var/home/core/zuul-output/0000755000175000017500000000000015112300574014523 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015112306401015462 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000004336717515112306372017715 0ustar rootrootNov 28 11:08:39 crc systemd[1]: Starting Kubernetes Kubelet... Nov 28 11:08:40 crc restorecon[4637]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 11:08:40 crc restorecon[4637]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 11:08:40 crc restorecon[4637]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 28 11:08:40 crc kubenswrapper[4923]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 11:08:40 crc kubenswrapper[4923]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 28 11:08:40 crc kubenswrapper[4923]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 11:08:40 crc kubenswrapper[4923]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 11:08:40 crc kubenswrapper[4923]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 28 11:08:40 crc kubenswrapper[4923]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.980610 4923 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986034 4923 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986065 4923 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986076 4923 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986086 4923 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986094 4923 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986104 4923 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986112 4923 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986120 4923 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986128 4923 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986139 4923 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986150 4923 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986158 4923 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986167 4923 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986175 4923 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986183 4923 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986193 4923 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986203 4923 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986212 4923 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986220 4923 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986229 4923 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986237 4923 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986245 4923 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986253 4923 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986261 4923 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986269 4923 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986277 4923 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986284 4923 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986292 4923 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986300 4923 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986308 4923 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986316 4923 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986330 4923 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986338 4923 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986346 4923 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986354 4923 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986363 4923 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986372 4923 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986380 4923 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986389 4923 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986397 4923 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986405 4923 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986413 4923 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986420 4923 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986428 4923 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986436 4923 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986444 4923 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986453 4923 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986462 4923 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986470 4923 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986478 4923 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986485 4923 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986493 4923 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986500 4923 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986508 4923 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986517 4923 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986525 4923 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986533 4923 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986540 4923 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986549 4923 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986556 4923 feature_gate.go:330] unrecognized feature gate: Example Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986564 4923 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986572 4923 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986579 4923 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986587 4923 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986599 4923 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986609 4923 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986617 4923 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986626 4923 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986634 4923 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986642 4923 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.986652 4923 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987030 4923 flags.go:64] FLAG: --address="0.0.0.0" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987050 4923 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987063 4923 flags.go:64] FLAG: --anonymous-auth="true" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987075 4923 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987087 4923 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987096 4923 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987107 4923 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987118 4923 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987127 4923 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987137 4923 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987147 4923 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987156 4923 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987168 4923 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987177 4923 flags.go:64] FLAG: --cgroup-root="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987186 4923 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987196 4923 flags.go:64] FLAG: --client-ca-file="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987205 4923 flags.go:64] FLAG: --cloud-config="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987214 4923 flags.go:64] FLAG: --cloud-provider="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987222 4923 flags.go:64] FLAG: --cluster-dns="[]" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987234 4923 flags.go:64] FLAG: --cluster-domain="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987243 4923 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987252 4923 flags.go:64] FLAG: --config-dir="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987260 4923 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987270 4923 flags.go:64] FLAG: --container-log-max-files="5" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987282 4923 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987291 4923 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987300 4923 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987310 4923 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987320 4923 flags.go:64] FLAG: --contention-profiling="false" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987329 4923 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987338 4923 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987348 4923 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987356 4923 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987367 4923 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987377 4923 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987385 4923 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987395 4923 flags.go:64] FLAG: --enable-load-reader="false" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987404 4923 flags.go:64] FLAG: --enable-server="true" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987412 4923 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987423 4923 flags.go:64] FLAG: --event-burst="100" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987432 4923 flags.go:64] FLAG: --event-qps="50" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987442 4923 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987451 4923 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987460 4923 flags.go:64] FLAG: --eviction-hard="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987470 4923 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987479 4923 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987488 4923 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987499 4923 flags.go:64] FLAG: --eviction-soft="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987509 4923 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987518 4923 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987526 4923 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987536 4923 flags.go:64] FLAG: --experimental-mounter-path="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987544 4923 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987553 4923 flags.go:64] FLAG: --fail-swap-on="true" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987562 4923 flags.go:64] FLAG: --feature-gates="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987573 4923 flags.go:64] FLAG: --file-check-frequency="20s" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987583 4923 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987592 4923 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987601 4923 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987611 4923 flags.go:64] FLAG: --healthz-port="10248" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987619 4923 flags.go:64] FLAG: --help="false" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987628 4923 flags.go:64] FLAG: --hostname-override="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987637 4923 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987647 4923 flags.go:64] FLAG: --http-check-frequency="20s" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987657 4923 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987665 4923 flags.go:64] FLAG: --image-credential-provider-config="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987674 4923 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987685 4923 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987693 4923 flags.go:64] FLAG: --image-service-endpoint="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987702 4923 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987711 4923 flags.go:64] FLAG: --kube-api-burst="100" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987720 4923 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987729 4923 flags.go:64] FLAG: --kube-api-qps="50" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987738 4923 flags.go:64] FLAG: --kube-reserved="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987747 4923 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987756 4923 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987765 4923 flags.go:64] FLAG: --kubelet-cgroups="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987774 4923 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987783 4923 flags.go:64] FLAG: --lock-file="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987793 4923 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987803 4923 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987812 4923 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987825 4923 flags.go:64] FLAG: --log-json-split-stream="false" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987834 4923 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987844 4923 flags.go:64] FLAG: --log-text-split-stream="false" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987853 4923 flags.go:64] FLAG: --logging-format="text" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987862 4923 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987872 4923 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987881 4923 flags.go:64] FLAG: --manifest-url="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987890 4923 flags.go:64] FLAG: --manifest-url-header="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987907 4923 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987917 4923 flags.go:64] FLAG: --max-open-files="1000000" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987928 4923 flags.go:64] FLAG: --max-pods="110" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987964 4923 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987973 4923 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987982 4923 flags.go:64] FLAG: --memory-manager-policy="None" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.987991 4923 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988000 4923 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988010 4923 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988019 4923 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988038 4923 flags.go:64] FLAG: --node-status-max-images="50" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988047 4923 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988057 4923 flags.go:64] FLAG: --oom-score-adj="-999" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988066 4923 flags.go:64] FLAG: --pod-cidr="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988075 4923 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988087 4923 flags.go:64] FLAG: --pod-manifest-path="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988096 4923 flags.go:64] FLAG: --pod-max-pids="-1" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988106 4923 flags.go:64] FLAG: --pods-per-core="0" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988116 4923 flags.go:64] FLAG: --port="10250" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988125 4923 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988134 4923 flags.go:64] FLAG: --provider-id="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988143 4923 flags.go:64] FLAG: --qos-reserved="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988152 4923 flags.go:64] FLAG: --read-only-port="10255" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988161 4923 flags.go:64] FLAG: --register-node="true" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988170 4923 flags.go:64] FLAG: --register-schedulable="true" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988179 4923 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988193 4923 flags.go:64] FLAG: --registry-burst="10" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988202 4923 flags.go:64] FLAG: --registry-qps="5" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988211 4923 flags.go:64] FLAG: --reserved-cpus="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988220 4923 flags.go:64] FLAG: --reserved-memory="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988232 4923 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988241 4923 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988250 4923 flags.go:64] FLAG: --rotate-certificates="false" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988259 4923 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988268 4923 flags.go:64] FLAG: --runonce="false" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988277 4923 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988286 4923 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988296 4923 flags.go:64] FLAG: --seccomp-default="false" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988304 4923 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988314 4923 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988323 4923 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988332 4923 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988342 4923 flags.go:64] FLAG: --storage-driver-password="root" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988351 4923 flags.go:64] FLAG: --storage-driver-secure="false" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988360 4923 flags.go:64] FLAG: --storage-driver-table="stats" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988369 4923 flags.go:64] FLAG: --storage-driver-user="root" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988378 4923 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988387 4923 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988396 4923 flags.go:64] FLAG: --system-cgroups="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988405 4923 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988419 4923 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988428 4923 flags.go:64] FLAG: --tls-cert-file="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988436 4923 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988448 4923 flags.go:64] FLAG: --tls-min-version="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988457 4923 flags.go:64] FLAG: --tls-private-key-file="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988466 4923 flags.go:64] FLAG: --topology-manager-policy="none" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988476 4923 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988485 4923 flags.go:64] FLAG: --topology-manager-scope="container" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988494 4923 flags.go:64] FLAG: --v="2" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988505 4923 flags.go:64] FLAG: --version="false" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988516 4923 flags.go:64] FLAG: --vmodule="" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988526 4923 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 28 11:08:40 crc kubenswrapper[4923]: I1128 11:08:40.988536 4923 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988734 4923 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988746 4923 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988756 4923 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988765 4923 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988773 4923 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988781 4923 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988789 4923 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988797 4923 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988805 4923 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988812 4923 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988820 4923 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988827 4923 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988839 4923 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988849 4923 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988858 4923 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988867 4923 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988875 4923 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988882 4923 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988891 4923 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988899 4923 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988907 4923 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988914 4923 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988922 4923 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988930 4923 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988959 4923 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988967 4923 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988974 4923 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988982 4923 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988990 4923 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.988998 4923 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989006 4923 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989014 4923 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989022 4923 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989030 4923 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989038 4923 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989045 4923 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989053 4923 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989060 4923 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989068 4923 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989076 4923 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989084 4923 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989092 4923 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989099 4923 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989107 4923 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989115 4923 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989122 4923 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989130 4923 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989138 4923 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989146 4923 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989153 4923 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989164 4923 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989173 4923 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989183 4923 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989191 4923 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989199 4923 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989208 4923 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989216 4923 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989224 4923 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989233 4923 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989241 4923 feature_gate.go:330] unrecognized feature gate: Example Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989250 4923 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989258 4923 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989266 4923 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989274 4923 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989282 4923 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989290 4923 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989298 4923 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 11:08:40 crc kubenswrapper[4923]: W1128 11:08:40.989305 4923 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.989313 4923 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.989321 4923 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.989331 4923 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:40.989355 4923 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:40.999000 4923 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:40.999035 4923 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999139 4923 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999150 4923 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999156 4923 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999162 4923 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999169 4923 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999178 4923 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999185 4923 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999191 4923 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999196 4923 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999202 4923 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999206 4923 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999211 4923 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999216 4923 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999222 4923 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999227 4923 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999231 4923 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999236 4923 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999241 4923 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999246 4923 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999252 4923 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999258 4923 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999264 4923 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999268 4923 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999273 4923 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999278 4923 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999283 4923 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999288 4923 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999292 4923 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999297 4923 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999303 4923 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999309 4923 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999315 4923 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999319 4923 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999325 4923 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999329 4923 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999334 4923 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999339 4923 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999345 4923 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999350 4923 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999354 4923 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999359 4923 feature_gate.go:330] unrecognized feature gate: Example Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999364 4923 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999368 4923 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999373 4923 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999378 4923 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999383 4923 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999387 4923 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999392 4923 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999397 4923 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999402 4923 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999406 4923 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999411 4923 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999416 4923 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999420 4923 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999425 4923 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999430 4923 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999436 4923 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999442 4923 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999447 4923 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999452 4923 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999458 4923 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999464 4923 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999468 4923 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999474 4923 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999479 4923 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999485 4923 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999491 4923 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999496 4923 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999500 4923 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999505 4923 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999510 4923 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:40.999519 4923 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999662 4923 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999670 4923 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999675 4923 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999680 4923 feature_gate.go:330] unrecognized feature gate: Example Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999686 4923 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999691 4923 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999696 4923 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999701 4923 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999707 4923 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999712 4923 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999717 4923 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999722 4923 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999727 4923 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999733 4923 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999740 4923 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999745 4923 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999750 4923 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999755 4923 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999761 4923 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999766 4923 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999771 4923 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999776 4923 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999781 4923 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999788 4923 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999794 4923 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999799 4923 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999804 4923 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999809 4923 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999814 4923 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999819 4923 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999826 4923 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999832 4923 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999839 4923 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999847 4923 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999855 4923 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999864 4923 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999872 4923 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999880 4923 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999889 4923 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999897 4923 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999904 4923 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999912 4923 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999919 4923 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999927 4923 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999963 4923 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999971 4923 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999978 4923 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999986 4923 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:40.999993 4923 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000002 4923 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000009 4923 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000017 4923 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000024 4923 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000035 4923 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000044 4923 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000053 4923 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000062 4923 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000070 4923 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000078 4923 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000086 4923 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000094 4923 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000101 4923 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000109 4923 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000116 4923 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000126 4923 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000135 4923 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000146 4923 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000156 4923 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000164 4923 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000172 4923 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.000181 4923 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.000192 4923 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.000738 4923 server.go:940] "Client rotation is on, will bootstrap in background" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.005106 4923 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.005216 4923 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.006057 4923 server.go:997] "Starting client certificate rotation" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.006095 4923 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.006632 4923 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-20 06:59:45.432882115 +0000 UTC Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.006759 4923 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 523h51m4.42612844s for next certificate rotation Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.013240 4923 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.017832 4923 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.032838 4923 log.go:25] "Validated CRI v1 runtime API" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.057423 4923 log.go:25] "Validated CRI v1 image API" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.059765 4923 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.063000 4923 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-28-11-02-29-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.063044 4923 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:41 fsType:tmpfs blockSize:0}] Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.081833 4923 manager.go:217] Machine: {Timestamp:2025-11-28 11:08:41.080448237 +0000 UTC m=+0.209132527 CPUVendorID:AuthenticAMD NumCores:8 NumPhysicalCores:1 NumSockets:8 CpuFrequency:2800000 MemoryCapacity:25199480832 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:bb6b4e53-d23a-4517-9d50-b05bdc3da8e4 BootID:f69ffe27-00d5-45aa-bb63-00075a21e0c7 Filesystems:[{Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:41 Capacity:1073741824 Type:vfs Inodes:3076108 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:12599738368 Type:vfs Inodes:3076108 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:5039898624 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:12599742464 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:2519945216 Type:vfs Inodes:615221 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:429496729600 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:90:76:09 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:90:76:09 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:d0:a3:3b Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:2f:a5:6e Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:42:a5:95 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:b5:65:b4 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:df:ae:6c Speed:-1 Mtu:1496} {Name:eth10 MacAddress:06:b3:be:34:61:6b Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:fa:f8:9c:83:65:8a Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:25199480832 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.082190 4923 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.082352 4923 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.083047 4923 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.083334 4923 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.083387 4923 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.083712 4923 topology_manager.go:138] "Creating topology manager with none policy" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.083731 4923 container_manager_linux.go:303] "Creating device plugin manager" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.084101 4923 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.084151 4923 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.084402 4923 state_mem.go:36] "Initialized new in-memory state store" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.084542 4923 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.085730 4923 kubelet.go:418] "Attempting to sync node with API server" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.085765 4923 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.085811 4923 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.085835 4923 kubelet.go:324] "Adding apiserver pod source" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.085851 4923 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.087869 4923 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.088493 4923 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.090054 4923 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.090309 4923 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.196:6443: connect: connection refused Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.090342 4923 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.196:6443: connect: connection refused Nov 28 11:08:41 crc kubenswrapper[4923]: E1128 11:08:41.090420 4923 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.196:6443: connect: connection refused" logger="UnhandledError" Nov 28 11:08:41 crc kubenswrapper[4923]: E1128 11:08:41.090473 4923 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.196:6443: connect: connection refused" logger="UnhandledError" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.090994 4923 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.091039 4923 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.091055 4923 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.091070 4923 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.091091 4923 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.091105 4923 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.091118 4923 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.091141 4923 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.091157 4923 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.091171 4923 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.091191 4923 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.091204 4923 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.091678 4923 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.092324 4923 server.go:1280] "Started kubelet" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.093664 4923 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.196:6443: connect: connection refused Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.094046 4923 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.094846 4923 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 28 11:08:41 crc systemd[1]: Started Kubernetes Kubelet. Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.097230 4923 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.097274 4923 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 28 11:08:41 crc kubenswrapper[4923]: E1128 11:08:41.097807 4923 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.098281 4923 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.098323 4923 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.098519 4923 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.098673 4923 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 06:01:43.454149248 +0000 UTC Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.099603 4923 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 28 11:08:41 crc kubenswrapper[4923]: E1128 11:08:41.100478 4923 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.196:6443: connect: connection refused" interval="200ms" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.101145 4923 factory.go:55] Registering systemd factory Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.101351 4923 factory.go:221] Registration of the systemd container factory successfully Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.103582 4923 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.196:6443: connect: connection refused Nov 28 11:08:41 crc kubenswrapper[4923]: E1128 11:08:41.104538 4923 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.196:6443: connect: connection refused" logger="UnhandledError" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.105428 4923 server.go:460] "Adding debug handlers to kubelet server" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.106689 4923 factory.go:153] Registering CRI-O factory Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.106813 4923 factory.go:221] Registration of the crio container factory successfully Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.106979 4923 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.107072 4923 factory.go:103] Registering Raw factory Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.107150 4923 manager.go:1196] Started watching for new ooms in manager Nov 28 11:08:41 crc kubenswrapper[4923]: E1128 11:08:41.106190 4923 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.196:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c27140cf039c8 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 11:08:41.092291016 +0000 UTC m=+0.220975266,LastTimestamp:2025-11-28 11:08:41.092291016 +0000 UTC m=+0.220975266,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.109359 4923 manager.go:319] Starting recovery of all containers Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.116605 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.116670 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.116693 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.116713 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.116731 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.116748 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.116768 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.116786 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.116806 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.116822 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.116841 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.116860 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.116878 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.116897 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.116914 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.116961 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.116984 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117001 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117017 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117035 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117051 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117076 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117095 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117122 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117142 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117159 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117181 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117200 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117222 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117240 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117258 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117276 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117295 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117312 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117332 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117349 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117369 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117388 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117405 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117425 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117445 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117463 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117482 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117502 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117519 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117535 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117553 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117570 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117588 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117606 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117625 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117645 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117668 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117687 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117706 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117726 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117746 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117763 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117781 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117800 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117816 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117833 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117850 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117867 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117886 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117902 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117920 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117963 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.117981 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.118000 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.118017 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.118033 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.118051 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.118068 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.118085 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.118102 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.118119 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.118135 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.118155 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.118172 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.120849 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.120889 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.120922 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.120988 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.121017 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.121043 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.121069 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.121096 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.131250 4923 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.132171 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.132256 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.132534 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.132580 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.132604 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.132632 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.132656 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.132676 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.132922 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.132984 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.133012 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.133034 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.133058 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.133290 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.133329 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.133354 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.133396 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.133430 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.133599 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134043 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134096 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134134 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134207 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134281 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134346 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134372 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134438 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134462 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134512 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134532 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134550 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134594 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134608 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134626 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134668 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134690 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134704 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134719 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134798 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134850 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134875 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.134916 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135120 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135181 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135228 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135261 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135371 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135412 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135436 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135463 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135501 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135538 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135577 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135618 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135640 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135666 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135686 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135713 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135733 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135753 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135791 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135816 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135856 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135887 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135920 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.135982 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136016 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136044 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136065 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136088 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136147 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136167 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136189 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136217 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136238 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136267 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136288 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136310 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136335 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136355 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136383 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136405 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136426 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136489 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136509 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136535 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136555 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136576 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136601 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136623 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136648 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136669 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136690 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136715 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136738 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136766 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136787 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136808 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136837 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136861 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136889 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136909 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136961 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.136990 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.137011 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.137031 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.137057 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.137079 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.137104 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.137126 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.137151 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.137190 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.137221 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.137260 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.137285 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.137307 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.137333 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.137353 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.137378 4923 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.137397 4923 reconstruct.go:97] "Volume reconstruction finished" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.137411 4923 reconciler.go:26] "Reconciler: start to sync state" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.143112 4923 manager.go:324] Recovery completed Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.155385 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.157501 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.157537 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.157549 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.158666 4923 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.158693 4923 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.158727 4923 state_mem.go:36] "Initialized new in-memory state store" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.165073 4923 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.167310 4923 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.167355 4923 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.167384 4923 kubelet.go:2335] "Starting kubelet main sync loop" Nov 28 11:08:41 crc kubenswrapper[4923]: E1128 11:08:41.167433 4923 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.168053 4923 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.196:6443: connect: connection refused Nov 28 11:08:41 crc kubenswrapper[4923]: E1128 11:08:41.168126 4923 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.196:6443: connect: connection refused" logger="UnhandledError" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.170301 4923 policy_none.go:49] "None policy: Start" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.174611 4923 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.174702 4923 state_mem.go:35] "Initializing new in-memory state store" Nov 28 11:08:41 crc kubenswrapper[4923]: E1128 11:08:41.198651 4923 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.237360 4923 manager.go:334] "Starting Device Plugin manager" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.237420 4923 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.237434 4923 server.go:79] "Starting device plugin registration server" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.237802 4923 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.237825 4923 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.238058 4923 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.238142 4923 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.238151 4923 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 28 11:08:41 crc kubenswrapper[4923]: E1128 11:08:41.250813 4923 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.268244 4923 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.268366 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.269715 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.269762 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.269783 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.269984 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.270210 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.270268 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.272524 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.272563 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.272579 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.272687 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.272866 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.272929 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.273594 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.273636 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.273606 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.273671 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.273686 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.273653 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.273858 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.274077 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.274126 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.274162 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.274192 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.274208 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.274978 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.275017 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.275032 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.275187 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.275844 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.275880 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.275897 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.275458 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.276113 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.276167 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.276188 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.276199 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.276373 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.276407 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.277106 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.277150 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.277165 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.277729 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.277755 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.277766 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:41 crc kubenswrapper[4923]: E1128 11:08:41.301460 4923 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.196:6443: connect: connection refused" interval="400ms" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.338301 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.338876 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.338959 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.338996 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.339099 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.339156 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.339190 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.339222 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.339254 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.339321 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.339398 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.339428 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.339457 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.339488 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.339515 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.339539 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.339552 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.339563 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.339592 4923 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.339580 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: E1128 11:08:41.340303 4923 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.196:6443: connect: connection refused" node="crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.440723 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.440811 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.440854 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.440887 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.440921 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.440991 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441020 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441008 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441096 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441107 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441305 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441116 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441393 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441013 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441431 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441162 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441369 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441533 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441561 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441067 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441563 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441669 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441585 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441116 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441138 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441731 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441762 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441767 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441794 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.441824 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.541080 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.542571 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.542614 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.542631 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.542665 4923 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 11:08:41 crc kubenswrapper[4923]: E1128 11:08:41.543193 4923 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.196:6443: connect: connection refused" node="crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.616438 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.637815 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-15c87eede1540984e762b37baa3755648500dd219eb4d0947e952bce6d298663 WatchSource:0}: Error finding container 15c87eede1540984e762b37baa3755648500dd219eb4d0947e952bce6d298663: Status 404 returned error can't find the container with id 15c87eede1540984e762b37baa3755648500dd219eb4d0947e952bce6d298663 Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.645381 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.669486 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-0088803fb9f40c0e7a20fd287698eff767d5cab05f1fd9383d1179b2fbc0885e WatchSource:0}: Error finding container 0088803fb9f40c0e7a20fd287698eff767d5cab05f1fd9383d1179b2fbc0885e: Status 404 returned error can't find the container with id 0088803fb9f40c0e7a20fd287698eff767d5cab05f1fd9383d1179b2fbc0885e Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.676507 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.697010 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.700677 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-f11fb4ce3e72ab85ced5506110546ce1c5286c5218ff0e6414bc70ff029cfd54 WatchSource:0}: Error finding container f11fb4ce3e72ab85ced5506110546ce1c5286c5218ff0e6414bc70ff029cfd54: Status 404 returned error can't find the container with id f11fb4ce3e72ab85ced5506110546ce1c5286c5218ff0e6414bc70ff029cfd54 Nov 28 11:08:41 crc kubenswrapper[4923]: E1128 11:08:41.702481 4923 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.196:6443: connect: connection refused" interval="800ms" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.706911 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.715891 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-147649f3ead8f868033c1fc8ed3e83e2ebde3a95b7f0fead92e8e7c4194116fe WatchSource:0}: Error finding container 147649f3ead8f868033c1fc8ed3e83e2ebde3a95b7f0fead92e8e7c4194116fe: Status 404 returned error can't find the container with id 147649f3ead8f868033c1fc8ed3e83e2ebde3a95b7f0fead92e8e7c4194116fe Nov 28 11:08:41 crc kubenswrapper[4923]: W1128 11:08:41.725898 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-8409d510f0f8086cc9358b53cc2a3998d88580b500312cb1cf6ae390ba9fa971 WatchSource:0}: Error finding container 8409d510f0f8086cc9358b53cc2a3998d88580b500312cb1cf6ae390ba9fa971: Status 404 returned error can't find the container with id 8409d510f0f8086cc9358b53cc2a3998d88580b500312cb1cf6ae390ba9fa971 Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.943810 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.945638 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.945680 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.945690 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:41 crc kubenswrapper[4923]: I1128 11:08:41.945717 4923 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 11:08:41 crc kubenswrapper[4923]: E1128 11:08:41.946119 4923 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.196:6443: connect: connection refused" node="crc" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.094843 4923 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.196:6443: connect: connection refused Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.098842 4923 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 07:39:28.635915377 +0000 UTC Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.098893 4923 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 332h30m46.537024779s for next certificate rotation Nov 28 11:08:42 crc kubenswrapper[4923]: W1128 11:08:42.115574 4923 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.196:6443: connect: connection refused Nov 28 11:08:42 crc kubenswrapper[4923]: E1128 11:08:42.115663 4923 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.196:6443: connect: connection refused" logger="UnhandledError" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.174867 4923 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="b40689154cc1372b3b8f28dc5dfbd82c6636401079cb0d287978e8bf6480b871" exitCode=0 Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.174927 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"b40689154cc1372b3b8f28dc5dfbd82c6636401079cb0d287978e8bf6480b871"} Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.175122 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"f11fb4ce3e72ab85ced5506110546ce1c5286c5218ff0e6414bc70ff029cfd54"} Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.176596 4923 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e" exitCode=0 Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.176657 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e"} Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.176687 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"0088803fb9f40c0e7a20fd287698eff767d5cab05f1fd9383d1179b2fbc0885e"} Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.176772 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.180584 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.180627 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.180641 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.181839 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b"} Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.181889 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"15c87eede1540984e762b37baa3755648500dd219eb4d0947e952bce6d298663"} Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.184689 4923 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005" exitCode=0 Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.184755 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005"} Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.184775 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"8409d510f0f8086cc9358b53cc2a3998d88580b500312cb1cf6ae390ba9fa971"} Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.184876 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.185833 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.185892 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.185909 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.188028 4923 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="f5f70899dd8b368d41c71079fac9b95180bbc79531c4c85229cb6d799f382781" exitCode=0 Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.188060 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"f5f70899dd8b368d41c71079fac9b95180bbc79531c4c85229cb6d799f382781"} Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.188087 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"147649f3ead8f868033c1fc8ed3e83e2ebde3a95b7f0fead92e8e7c4194116fe"} Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.188193 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.188898 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.188953 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.188965 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.189978 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.191046 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.191074 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.191134 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:42 crc kubenswrapper[4923]: W1128 11:08:42.325847 4923 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.196:6443: connect: connection refused Nov 28 11:08:42 crc kubenswrapper[4923]: E1128 11:08:42.325922 4923 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.196:6443: connect: connection refused" logger="UnhandledError" Nov 28 11:08:42 crc kubenswrapper[4923]: W1128 11:08:42.413017 4923 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.196:6443: connect: connection refused Nov 28 11:08:42 crc kubenswrapper[4923]: E1128 11:08:42.413163 4923 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.196:6443: connect: connection refused" logger="UnhandledError" Nov 28 11:08:42 crc kubenswrapper[4923]: W1128 11:08:42.417840 4923 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.196:6443: connect: connection refused Nov 28 11:08:42 crc kubenswrapper[4923]: E1128 11:08:42.417901 4923 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.196:6443: connect: connection refused" logger="UnhandledError" Nov 28 11:08:42 crc kubenswrapper[4923]: E1128 11:08:42.501912 4923 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.196:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c27140cf039c8 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 11:08:41.092291016 +0000 UTC m=+0.220975266,LastTimestamp:2025-11-28 11:08:41.092291016 +0000 UTC m=+0.220975266,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 11:08:42 crc kubenswrapper[4923]: E1128 11:08:42.503368 4923 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.196:6443: connect: connection refused" interval="1.6s" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.746356 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.747916 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.747964 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.747976 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:42 crc kubenswrapper[4923]: I1128 11:08:42.747999 4923 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.193093 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"81443f6c4751860dce1d5ecf0f867a1c9641a989cbfd171e71de418f738108c7"} Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.193154 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"3513d0400c621295e074b54a00fe7f284c38bebd8e7f11315db91fef9a2a4693"} Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.193175 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"403d762c4ba4c4f3309ef1b447be25f7882da8a2d03b9376711063165438294f"} Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.193299 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.194510 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.194552 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.194568 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.197807 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096"} Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.197855 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277"} Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.197875 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7"} Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.197830 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.199011 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.199037 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.199045 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.202677 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18"} Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.202720 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9"} Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.202741 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8"} Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.202757 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54"} Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.202774 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3"} Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.202892 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.203964 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.204011 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.204031 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.204818 4923 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="cc14b24aaf2a5d68f1b6fb8fff339ca4dac0206479463ea24cfdcdbb70d4b341" exitCode=0 Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.204875 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.204894 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"cc14b24aaf2a5d68f1b6fb8fff339ca4dac0206479463ea24cfdcdbb70d4b341"} Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.205078 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.205550 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.205569 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.205576 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.206059 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.206114 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:43 crc kubenswrapper[4923]: I1128 11:08:43.206138 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:44 crc kubenswrapper[4923]: I1128 11:08:44.210900 4923 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="6c5124d00a4e11dafa0cc5cd5c64147a9db6a83430fc9f0ed30cae3db40d46dc" exitCode=0 Nov 28 11:08:44 crc kubenswrapper[4923]: I1128 11:08:44.211054 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"6c5124d00a4e11dafa0cc5cd5c64147a9db6a83430fc9f0ed30cae3db40d46dc"} Nov 28 11:08:44 crc kubenswrapper[4923]: I1128 11:08:44.211224 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:44 crc kubenswrapper[4923]: I1128 11:08:44.212511 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:44 crc kubenswrapper[4923]: I1128 11:08:44.212559 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:44 crc kubenswrapper[4923]: I1128 11:08:44.212576 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:44 crc kubenswrapper[4923]: I1128 11:08:44.216459 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"9d943757b7fd4ba70234c55984a5db6cb54ab9d5bded184d981fdb3d770925ea"} Nov 28 11:08:44 crc kubenswrapper[4923]: I1128 11:08:44.216558 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:44 crc kubenswrapper[4923]: I1128 11:08:44.216584 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:44 crc kubenswrapper[4923]: I1128 11:08:44.219779 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:44 crc kubenswrapper[4923]: I1128 11:08:44.219850 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:44 crc kubenswrapper[4923]: I1128 11:08:44.219876 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:44 crc kubenswrapper[4923]: I1128 11:08:44.219795 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:44 crc kubenswrapper[4923]: I1128 11:08:44.221643 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:44 crc kubenswrapper[4923]: I1128 11:08:44.221675 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:45 crc kubenswrapper[4923]: I1128 11:08:45.224799 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"93ada60cb205defae9fe71696319d0aaa2d8d48980aab7ab0fb89ff615d8c7a0"} Nov 28 11:08:45 crc kubenswrapper[4923]: I1128 11:08:45.224986 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9bff5c9e7a4713a14d9d0df53d9c94fbaa6312abc2818caf4f0883b64a132549"} Nov 28 11:08:45 crc kubenswrapper[4923]: I1128 11:08:45.225015 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6a9ab7591a503e0c2fafec3b01b0fbac57cc7cd50e3818a2dd2db62de6a96edc"} Nov 28 11:08:45 crc kubenswrapper[4923]: I1128 11:08:45.515429 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:08:45 crc kubenswrapper[4923]: I1128 11:08:45.515685 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:45 crc kubenswrapper[4923]: I1128 11:08:45.515844 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:08:45 crc kubenswrapper[4923]: I1128 11:08:45.517043 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:45 crc kubenswrapper[4923]: I1128 11:08:45.517093 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:45 crc kubenswrapper[4923]: I1128 11:08:45.517110 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:46 crc kubenswrapper[4923]: I1128 11:08:46.232538 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:46 crc kubenswrapper[4923]: I1128 11:08:46.232539 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"4cab9c87fe902a35a97ee9236bb68b014682211270aa2e94bd3240d8c3645ebc"} Nov 28 11:08:46 crc kubenswrapper[4923]: I1128 11:08:46.232595 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"8e5978e4a8d212fc6b2f5c865fc7ff9e03de5c35cb68646b36d878e058ab2530"} Nov 28 11:08:46 crc kubenswrapper[4923]: I1128 11:08:46.232699 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:46 crc kubenswrapper[4923]: I1128 11:08:46.233323 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:46 crc kubenswrapper[4923]: I1128 11:08:46.233368 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:46 crc kubenswrapper[4923]: I1128 11:08:46.233384 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:46 crc kubenswrapper[4923]: I1128 11:08:46.233707 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:46 crc kubenswrapper[4923]: I1128 11:08:46.233731 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:46 crc kubenswrapper[4923]: I1128 11:08:46.233741 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:47 crc kubenswrapper[4923]: I1128 11:08:47.235646 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:47 crc kubenswrapper[4923]: I1128 11:08:47.236999 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:47 crc kubenswrapper[4923]: I1128 11:08:47.237037 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:47 crc kubenswrapper[4923]: I1128 11:08:47.237049 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:47 crc kubenswrapper[4923]: I1128 11:08:47.251203 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:08:47 crc kubenswrapper[4923]: I1128 11:08:47.251419 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:47 crc kubenswrapper[4923]: I1128 11:08:47.252708 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:47 crc kubenswrapper[4923]: I1128 11:08:47.252755 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:47 crc kubenswrapper[4923]: I1128 11:08:47.252771 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:47 crc kubenswrapper[4923]: I1128 11:08:47.338402 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 11:08:47 crc kubenswrapper[4923]: I1128 11:08:47.338563 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:47 crc kubenswrapper[4923]: I1128 11:08:47.340058 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:47 crc kubenswrapper[4923]: I1128 11:08:47.340106 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:47 crc kubenswrapper[4923]: I1128 11:08:47.340124 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:47 crc kubenswrapper[4923]: I1128 11:08:47.742299 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 28 11:08:48 crc kubenswrapper[4923]: I1128 11:08:48.239264 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:48 crc kubenswrapper[4923]: I1128 11:08:48.243691 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:48 crc kubenswrapper[4923]: I1128 11:08:48.244360 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:48 crc kubenswrapper[4923]: I1128 11:08:48.244425 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:49 crc kubenswrapper[4923]: I1128 11:08:49.257090 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:08:49 crc kubenswrapper[4923]: I1128 11:08:49.257308 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:49 crc kubenswrapper[4923]: I1128 11:08:49.258511 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:49 crc kubenswrapper[4923]: I1128 11:08:49.258559 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:49 crc kubenswrapper[4923]: I1128 11:08:49.258572 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:49 crc kubenswrapper[4923]: I1128 11:08:49.477783 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:08:49 crc kubenswrapper[4923]: I1128 11:08:49.484835 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:08:50 crc kubenswrapper[4923]: I1128 11:08:50.164580 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:08:50 crc kubenswrapper[4923]: I1128 11:08:50.243492 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:50 crc kubenswrapper[4923]: I1128 11:08:50.243616 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:08:50 crc kubenswrapper[4923]: I1128 11:08:50.244520 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:50 crc kubenswrapper[4923]: I1128 11:08:50.244576 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:50 crc kubenswrapper[4923]: I1128 11:08:50.244599 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:50 crc kubenswrapper[4923]: I1128 11:08:50.422680 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 28 11:08:50 crc kubenswrapper[4923]: I1128 11:08:50.422812 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:50 crc kubenswrapper[4923]: I1128 11:08:50.423789 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:50 crc kubenswrapper[4923]: I1128 11:08:50.423815 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:50 crc kubenswrapper[4923]: I1128 11:08:50.423823 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:51 crc kubenswrapper[4923]: I1128 11:08:51.245767 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:51 crc kubenswrapper[4923]: I1128 11:08:51.248550 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:51 crc kubenswrapper[4923]: I1128 11:08:51.248595 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:51 crc kubenswrapper[4923]: I1128 11:08:51.248619 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:51 crc kubenswrapper[4923]: E1128 11:08:51.251139 4923 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 11:08:52 crc kubenswrapper[4923]: E1128 11:08:52.749455 4923 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": net/http: TLS handshake timeout" node="crc" Nov 28 11:08:53 crc kubenswrapper[4923]: I1128 11:08:53.091308 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:08:53 crc kubenswrapper[4923]: I1128 11:08:53.091493 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:53 crc kubenswrapper[4923]: I1128 11:08:53.093015 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:53 crc kubenswrapper[4923]: I1128 11:08:53.093072 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:53 crc kubenswrapper[4923]: I1128 11:08:53.093091 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:53 crc kubenswrapper[4923]: I1128 11:08:53.095889 4923 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 28 11:08:53 crc kubenswrapper[4923]: I1128 11:08:53.165633 4923 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 11:08:53 crc kubenswrapper[4923]: I1128 11:08:53.165733 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 11:08:54 crc kubenswrapper[4923]: I1128 11:08:54.050466 4923 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 28 11:08:54 crc kubenswrapper[4923]: I1128 11:08:54.050522 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 28 11:08:54 crc kubenswrapper[4923]: I1128 11:08:54.078617 4923 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 28 11:08:54 crc kubenswrapper[4923]: I1128 11:08:54.078664 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 28 11:08:54 crc kubenswrapper[4923]: I1128 11:08:54.350382 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:54 crc kubenswrapper[4923]: I1128 11:08:54.351480 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:54 crc kubenswrapper[4923]: I1128 11:08:54.351509 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:54 crc kubenswrapper[4923]: I1128 11:08:54.351517 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:54 crc kubenswrapper[4923]: I1128 11:08:54.351537 4923 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 11:08:57 crc kubenswrapper[4923]: I1128 11:08:57.260029 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:08:57 crc kubenswrapper[4923]: I1128 11:08:57.260285 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:57 crc kubenswrapper[4923]: I1128 11:08:57.261631 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:57 crc kubenswrapper[4923]: I1128 11:08:57.261696 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:57 crc kubenswrapper[4923]: I1128 11:08:57.261714 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:57 crc kubenswrapper[4923]: I1128 11:08:57.266665 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:08:57 crc kubenswrapper[4923]: I1128 11:08:57.780811 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 28 11:08:57 crc kubenswrapper[4923]: I1128 11:08:57.781175 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:57 crc kubenswrapper[4923]: I1128 11:08:57.783278 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:57 crc kubenswrapper[4923]: I1128 11:08:57.783335 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:57 crc kubenswrapper[4923]: I1128 11:08:57.783353 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:57 crc kubenswrapper[4923]: I1128 11:08:57.799887 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 28 11:08:58 crc kubenswrapper[4923]: I1128 11:08:58.264502 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:58 crc kubenswrapper[4923]: I1128 11:08:58.264757 4923 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 11:08:58 crc kubenswrapper[4923]: I1128 11:08:58.265838 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:58 crc kubenswrapper[4923]: I1128 11:08:58.265883 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:58 crc kubenswrapper[4923]: I1128 11:08:58.265901 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:58 crc kubenswrapper[4923]: I1128 11:08:58.266273 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:58 crc kubenswrapper[4923]: I1128 11:08:58.266333 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:58 crc kubenswrapper[4923]: I1128 11:08:58.266350 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.051134 4923 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="3.2s" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.054002 4923 trace.go:236] Trace[792950393]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 11:08:44.923) (total time: 14130ms): Nov 28 11:08:59 crc kubenswrapper[4923]: Trace[792950393]: ---"Objects listed" error: 14130ms (11:08:59.053) Nov 28 11:08:59 crc kubenswrapper[4923]: Trace[792950393]: [14.130711245s] [14.130711245s] END Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.054049 4923 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.055215 4923 trace.go:236] Trace[1364348307]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 11:08:44.628) (total time: 14426ms): Nov 28 11:08:59 crc kubenswrapper[4923]: Trace[1364348307]: ---"Objects listed" error: 14426ms (11:08:59.055) Nov 28 11:08:59 crc kubenswrapper[4923]: Trace[1364348307]: [14.426552491s] [14.426552491s] END Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.055573 4923 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.055330 4923 trace.go:236] Trace[1386841727]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 11:08:44.392) (total time: 14662ms): Nov 28 11:08:59 crc kubenswrapper[4923]: Trace[1386841727]: ---"Objects listed" error: 14662ms (11:08:59.055) Nov 28 11:08:59 crc kubenswrapper[4923]: Trace[1386841727]: [14.662463991s] [14.662463991s] END Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.055846 4923 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.059166 4923 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.060026 4923 trace.go:236] Trace[1880433425]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 11:08:44.116) (total time: 14943ms): Nov 28 11:08:59 crc kubenswrapper[4923]: Trace[1880433425]: ---"Objects listed" error: 14943ms (11:08:59.059) Nov 28 11:08:59 crc kubenswrapper[4923]: Trace[1880433425]: [14.943259173s] [14.943259173s] END Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.060060 4923 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.097349 4923 apiserver.go:52] "Watching apiserver" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.112312 4923 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.112622 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb"] Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.113063 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.113141 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.113078 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.113281 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.113465 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.113652 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.113776 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.113869 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.114437 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.119871 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.119893 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.120829 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.120912 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.121245 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.121604 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.122027 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.122464 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.122026 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.168513 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.186628 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.199226 4923 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.201290 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.211092 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.218810 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.232632 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.245211 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.252774 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260364 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260406 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260429 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260450 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260471 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260495 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260516 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260536 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260559 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260578 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260605 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260627 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260647 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260667 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260690 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260688 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260711 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260688 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260739 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260766 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260786 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260811 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260830 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260853 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260857 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260904 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260924 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260960 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260978 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260995 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261013 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261029 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261046 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261064 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261082 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261099 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261114 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261129 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261146 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261162 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261180 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261196 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261214 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261228 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261243 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261260 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261275 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261290 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261307 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261323 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261337 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261354 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261369 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261384 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261402 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261417 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261433 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261841 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261879 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261918 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261958 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261985 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.262011 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.262036 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.262061 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.262087 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.262111 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.262128 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.262149 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.262170 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.262189 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.262211 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.262230 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.262255 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.264514 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.264582 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.264618 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265056 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265128 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265164 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265258 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265283 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265313 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265340 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265408 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265439 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265488 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265512 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265543 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265573 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265625 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265674 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265726 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265775 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265845 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265913 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.266121 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.266162 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.266187 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.266218 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.266629 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.266748 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.266800 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.266833 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.266861 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.266891 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.267075 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.267124 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.267177 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.267208 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.260868 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261106 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.267238 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.267287 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.267437 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.267538 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.267610 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.267667 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.267790 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.267888 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.267970 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268077 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268116 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268178 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268216 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268269 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268335 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268361 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268399 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268435 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268615 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268658 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268688 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268772 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268820 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268885 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268913 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.269008 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.269045 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.269097 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.269148 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.269175 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.269203 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.269253 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.269281 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.269362 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.269666 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.262159 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.269794 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.261437 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.262297 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.262469 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.262435 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.262704 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.262824 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.263247 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.263559 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.263668 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.263747 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.263817 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.263885 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.263994 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.264101 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.264110 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.264387 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.264397 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.264625 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.264619 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.264683 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265040 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265191 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265343 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265357 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265501 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265654 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.265731 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.266057 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.266396 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.266447 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.266615 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.266704 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.270259 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.266885 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.266979 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.267173 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.267212 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.263263 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.267625 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268562 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268626 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.268972 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.269007 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.269310 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.269330 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.269481 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.269668 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.270030 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.270519 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.270590 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.270632 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.270666 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.270868 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.271018 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.271194 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.271594 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.271711 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.272342 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.272594 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.272756 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.273028 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.273090 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.273703 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.273753 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.274036 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.274965 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.275226 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.275372 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.275461 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.275743 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.275894 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.275995 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.276610 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.276840 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.276893 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.277045 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.272643 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.277527 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.277993 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.278143 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.283515 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.283642 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.283734 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.283801 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.283891 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.284001 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.284091 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.284193 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.284255 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.284338 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.284508 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.284590 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.284685 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.284749 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.284838 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.284909 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.285016 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.285080 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.285166 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.285229 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.285317 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.285386 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.285477 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.285543 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.285834 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.285974 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.283613 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.283737 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.283850 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.284167 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.287920 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.288172 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.288481 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.288537 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.288665 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.288674 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.289022 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.289138 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.289197 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.289999 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.290094 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.290472 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.290475 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.290719 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.290800 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.291165 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.291375 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.291409 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.291679 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.291753 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.291859 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.291979 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.292001 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.292011 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.292126 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.292152 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.292176 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.292224 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.292274 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.292269 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.292324 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.292580 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.292755 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.292907 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.293232 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.293336 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:08:59.79331376 +0000 UTC m=+18.921998081 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.295759 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.295791 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.295811 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.295856 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.295879 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.295978 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296027 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296047 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296067 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296085 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296106 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296123 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296141 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296160 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296178 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296199 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296226 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296275 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296299 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296318 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296338 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296818 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296850 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.297104 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.297123 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.297150 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.297176 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.297197 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.297220 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.297245 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.297264 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.297333 4923 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.297345 4923 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.297356 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.297367 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.297377 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.297387 4923 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.297395 4923 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.297405 4923 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.300163 4923 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.300176 4923 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296297 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296362 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.296926 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.297288 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.297414 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.298183 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.298419 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.298733 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.298998 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.299267 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.300204 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.300425 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.300451 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.300703 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.300767 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.301163 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.301307 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.301313 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.301462 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.301649 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.301730 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.301907 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.301997 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.302157 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.302210 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.302219 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.302396 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.302483 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.302611 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.302699 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.301759 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.302811 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.302819 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.303206 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.303423 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.303496 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.303623 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.303629 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.303823 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.303915 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.304045 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.304195 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.304211 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.304312 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.304418 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.309966 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.304589 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.304764 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.295434 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.305702 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.305742 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.306234 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.306237 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.306978 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.307177 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.307479 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.307406 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.307907 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.308283 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.310256 4923 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312166 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312375 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312430 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312449 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312465 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312478 4923 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312494 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.308391 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312507 4923 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312520 4923 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312532 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312544 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312557 4923 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312569 4923 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312581 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312596 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312609 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312634 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312646 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312658 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312670 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312682 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312695 4923 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312708 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312723 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312736 4923 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312750 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312763 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312774 4923 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312788 4923 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312800 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312813 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312825 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312838 4923 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312850 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312862 4923 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312873 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315483 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315509 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315525 4923 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315541 4923 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315553 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315566 4923 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315578 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315591 4923 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315603 4923 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315618 4923 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315632 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315647 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315660 4923 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315672 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315685 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315699 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315710 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315722 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315737 4923 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315753 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315766 4923 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315778 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315790 4923 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315801 4923 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315813 4923 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315825 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315836 4923 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315850 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315866 4923 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315879 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315892 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315904 4923 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315918 4923 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315974 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315989 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316003 4923 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316017 4923 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316033 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316047 4923 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316060 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316074 4923 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316086 4923 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316098 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316110 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316122 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316134 4923 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316145 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316156 4923 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316167 4923 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316180 4923 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316192 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316204 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316215 4923 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316227 4923 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316238 4923 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316249 4923 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316266 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316278 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316291 4923 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316303 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316314 4923 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316325 4923 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316337 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316350 4923 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316362 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316374 4923 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316386 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316398 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.316410 4923 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.315268 4923 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.308574 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.308634 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.310062 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.310084 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.310096 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.310325 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.310377 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.310590 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.310768 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.311001 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.311441 4923 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.311544 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.311558 4923 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312317 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312448 4923 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:58400->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.312477 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.319500 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.319636 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 11:08:59.819621834 +0000 UTC m=+18.948306044 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.319980 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 11:08:59.819961204 +0000 UTC m=+18.948645434 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.320026 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:58400->192.168.126.11:17697: read: connection reset by peer" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.320359 4923 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.320384 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.321052 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.332120 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.333295 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.334476 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.335794 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.336174 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.340615 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.340645 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.340658 4923 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.340716 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 11:08:59.84069848 +0000 UTC m=+18.969382690 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.346043 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.346080 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.346094 4923 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.346155 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 11:08:59.846130984 +0000 UTC m=+18.974815194 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.347499 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.362509 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.363849 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.366352 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.373007 4923 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.373105 4923 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.376629 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.376659 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.376667 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.376682 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.376693 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:08:59Z","lastTransitionTime":"2025-11-28T11:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.391712 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.394977 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.395010 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.395019 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.395034 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.395043 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:08:59Z","lastTransitionTime":"2025-11-28T11:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.402797 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.407243 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.407268 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.407276 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.407288 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.407299 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:08:59Z","lastTransitionTime":"2025-11-28T11:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.414119 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.416875 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.416946 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.416982 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.416992 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417000 4923 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417011 4923 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417019 4923 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417027 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417035 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417043 4923 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417051 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417060 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417068 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417077 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417085 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417094 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417103 4923 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417111 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417118 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417127 4923 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417135 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417142 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417150 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417158 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417165 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417174 4923 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417183 4923 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417190 4923 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417198 4923 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417206 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417214 4923 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417222 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417230 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417238 4923 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417246 4923 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417255 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417263 4923 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417271 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417280 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417289 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417297 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417305 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417313 4923 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417320 4923 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417329 4923 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417337 4923 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417345 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417354 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417363 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417383 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417393 4923 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417400 4923 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417408 4923 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417416 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417423 4923 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417431 4923 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417439 4923 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417446 4923 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417454 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417462 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417470 4923 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417478 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417486 4923 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417495 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417503 4923 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417511 4923 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417518 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417526 4923 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417533 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417540 4923 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417540 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417548 4923 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417659 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417665 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417678 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417688 4923 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417696 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417704 4923 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417736 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417747 4923 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417756 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417764 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417772 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417783 4923 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417828 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417846 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417871 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417884 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.417892 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:08:59Z","lastTransitionTime":"2025-11-28T11:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.428778 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.440955 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.445320 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.449269 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.450744 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.450764 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.450771 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.450784 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.450792 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:08:59Z","lastTransitionTime":"2025-11-28T11:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.471872 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.471993 4923 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 11:08:59 crc kubenswrapper[4923]: W1128 11:08:59.481081 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-3f6abe9f311f88ea2247db6bf772e5f4cf056a3fa1b3077acbf32cba903c23de WatchSource:0}: Error finding container 3f6abe9f311f88ea2247db6bf772e5f4cf056a3fa1b3077acbf32cba903c23de: Status 404 returned error can't find the container with id 3f6abe9f311f88ea2247db6bf772e5f4cf056a3fa1b3077acbf32cba903c23de Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.481255 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.481277 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.481306 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.481321 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.481330 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:08:59Z","lastTransitionTime":"2025-11-28T11:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.584075 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.584280 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.584288 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.584302 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.584316 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:08:59Z","lastTransitionTime":"2025-11-28T11:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.686608 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.686647 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.686655 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.686671 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.686679 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:08:59Z","lastTransitionTime":"2025-11-28T11:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.788530 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.788571 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.788580 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.788595 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.788605 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:08:59Z","lastTransitionTime":"2025-11-28T11:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.820040 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.820113 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.820131 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.820207 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:09:00.820180697 +0000 UTC m=+19.948864907 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.820230 4923 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.820271 4923 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.820281 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:00.82026841 +0000 UTC m=+19.948952610 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.820407 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:00.820367942 +0000 UTC m=+19.949052152 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.890519 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.890555 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.890563 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.890578 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.890587 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:08:59Z","lastTransitionTime":"2025-11-28T11:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.921171 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.921218 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.921331 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.921347 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.921359 4923 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.921380 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.921402 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:00.921390429 +0000 UTC m=+20.050074639 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.921407 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.921420 4923 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:08:59 crc kubenswrapper[4923]: E1128 11:08:59.921475 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:00.921460701 +0000 UTC m=+20.050144911 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.992971 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.993008 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.993016 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.993030 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:08:59 crc kubenswrapper[4923]: I1128 11:08:59.993042 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:08:59Z","lastTransitionTime":"2025-11-28T11:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.038768 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-766k2"] Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.039330 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-766k2" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.040995 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.041354 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.041761 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.055141 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.067689 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.095596 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.095663 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.095679 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.095704 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.095721 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:00Z","lastTransitionTime":"2025-11-28T11:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.111971 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.122584 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnr6k\" (UniqueName: \"kubernetes.io/projected/69fcf39a-3416-4733-a55a-043d5286f8ac-kube-api-access-dnr6k\") pod \"node-resolver-766k2\" (UID: \"69fcf39a-3416-4733-a55a-043d5286f8ac\") " pod="openshift-dns/node-resolver-766k2" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.122631 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/69fcf39a-3416-4733-a55a-043d5286f8ac-hosts-file\") pod \"node-resolver-766k2\" (UID: \"69fcf39a-3416-4733-a55a-043d5286f8ac\") " pod="openshift-dns/node-resolver-766k2" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.141307 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.158750 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.169604 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.174361 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.186699 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.198176 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.198230 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.198243 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.198267 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.198282 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:00Z","lastTransitionTime":"2025-11-28T11:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.199539 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.215718 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.223027 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/69fcf39a-3416-4733-a55a-043d5286f8ac-hosts-file\") pod \"node-resolver-766k2\" (UID: \"69fcf39a-3416-4733-a55a-043d5286f8ac\") " pod="openshift-dns/node-resolver-766k2" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.223063 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnr6k\" (UniqueName: \"kubernetes.io/projected/69fcf39a-3416-4733-a55a-043d5286f8ac-kube-api-access-dnr6k\") pod \"node-resolver-766k2\" (UID: \"69fcf39a-3416-4733-a55a-043d5286f8ac\") " pod="openshift-dns/node-resolver-766k2" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.223341 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/69fcf39a-3416-4733-a55a-043d5286f8ac-hosts-file\") pod \"node-resolver-766k2\" (UID: \"69fcf39a-3416-4733-a55a-043d5286f8ac\") " pod="openshift-dns/node-resolver-766k2" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.247314 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnr6k\" (UniqueName: \"kubernetes.io/projected/69fcf39a-3416-4733-a55a-043d5286f8ac-kube-api-access-dnr6k\") pod \"node-resolver-766k2\" (UID: \"69fcf39a-3416-4733-a55a-043d5286f8ac\") " pod="openshift-dns/node-resolver-766k2" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.249126 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.264540 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.277923 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21"} Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.278181 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"f499a06d56334e94d7afa16c404e6bec2654bf3bcf308f027a914e2eab37a83a"} Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.279868 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.281703 4923 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18" exitCode=255 Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.281764 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18"} Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.283638 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2"} Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.283702 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121"} Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.283732 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"3f6abe9f311f88ea2247db6bf772e5f4cf056a3fa1b3077acbf32cba903c23de"} Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.284527 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"3a7f2cb371bb0466051005e67fd1cc5b4dfac81cb95658421ce03981dac71d90"} Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.292485 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.301031 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.301072 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.301084 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.301101 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.301112 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:00Z","lastTransitionTime":"2025-11-28T11:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.310214 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.316396 4923 scope.go:117] "RemoveContainer" containerID="fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.316637 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.316901 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.352108 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-766k2" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.352144 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.369901 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: W1128 11:09:00.375841 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod69fcf39a_3416_4733_a55a_043d5286f8ac.slice/crio-41b5dba3d1dee38479cd8e953f9e002b43890a2ef83b2277aed281cfecbad2a8 WatchSource:0}: Error finding container 41b5dba3d1dee38479cd8e953f9e002b43890a2ef83b2277aed281cfecbad2a8: Status 404 returned error can't find the container with id 41b5dba3d1dee38479cd8e953f9e002b43890a2ef83b2277aed281cfecbad2a8 Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.390453 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.405527 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.405553 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.405561 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.405573 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.405582 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:00Z","lastTransitionTime":"2025-11-28T11:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.408009 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-h5s2m"] Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.408434 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-bwdth"] Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.408751 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-9gjj9"] Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.409110 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.409245 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.409775 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.411344 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.411645 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.413276 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.413433 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.413481 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.413514 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.413639 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.413987 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.414238 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.414326 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.414345 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.415735 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.417102 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.439899 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.457757 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.474204 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.488752 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.507419 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.507447 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.507457 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.507471 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.507481 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:00Z","lastTransitionTime":"2025-11-28T11:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.521784 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526019 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nthb2\" (UniqueName: \"kubernetes.io/projected/092566f7-fc7d-4897-a1f2-4ecedcd3058e-kube-api-access-nthb2\") pod \"machine-config-daemon-bwdth\" (UID: \"092566f7-fc7d-4897-a1f2-4ecedcd3058e\") " pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526049 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/092566f7-fc7d-4897-a1f2-4ecedcd3058e-proxy-tls\") pod \"machine-config-daemon-bwdth\" (UID: \"092566f7-fc7d-4897-a1f2-4ecedcd3058e\") " pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526070 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-hostroot\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526086 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/84374038-67ce-4dc0-a2c2-6eed9650c604-cni-binary-copy\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526101 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-cni-binary-copy\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526130 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-os-release\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526145 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/092566f7-fc7d-4897-a1f2-4ecedcd3058e-mcd-auth-proxy-config\") pod \"machine-config-daemon-bwdth\" (UID: \"092566f7-fc7d-4897-a1f2-4ecedcd3058e\") " pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526160 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-cnibin\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526182 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4j55d\" (UniqueName: \"kubernetes.io/projected/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-kube-api-access-4j55d\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526196 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/092566f7-fc7d-4897-a1f2-4ecedcd3058e-rootfs\") pod \"machine-config-daemon-bwdth\" (UID: \"092566f7-fc7d-4897-a1f2-4ecedcd3058e\") " pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526212 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-host-run-k8s-cni-cncf-io\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526226 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-multus-conf-dir\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526242 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-system-cni-dir\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526256 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-system-cni-dir\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526270 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-host-var-lib-cni-bin\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526287 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-multus-socket-dir-parent\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526301 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-host-run-multus-certs\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526316 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-etc-kubernetes\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526331 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-multus-cni-dir\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526344 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-os-release\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526365 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526379 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-host-var-lib-cni-multus\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526413 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-tuning-conf-dir\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526430 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-host-var-lib-kubelet\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526447 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-host-run-netns\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526461 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/84374038-67ce-4dc0-a2c2-6eed9650c604-multus-daemon-config\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526475 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8z7ts\" (UniqueName: \"kubernetes.io/projected/84374038-67ce-4dc0-a2c2-6eed9650c604-kube-api-access-8z7ts\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.526490 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-cnibin\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.549771 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.572001 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.589296 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.617045 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.617074 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.617082 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.617094 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.617103 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:00Z","lastTransitionTime":"2025-11-28T11:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.627771 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/092566f7-fc7d-4897-a1f2-4ecedcd3058e-proxy-tls\") pod \"machine-config-daemon-bwdth\" (UID: \"092566f7-fc7d-4897-a1f2-4ecedcd3058e\") " pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.627801 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-hostroot\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.627818 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/84374038-67ce-4dc0-a2c2-6eed9650c604-cni-binary-copy\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.627838 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-os-release\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.627855 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-cni-binary-copy\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.627880 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/092566f7-fc7d-4897-a1f2-4ecedcd3058e-mcd-auth-proxy-config\") pod \"machine-config-daemon-bwdth\" (UID: \"092566f7-fc7d-4897-a1f2-4ecedcd3058e\") " pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.627896 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-cnibin\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.627915 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-multus-conf-dir\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.627943 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4j55d\" (UniqueName: \"kubernetes.io/projected/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-kube-api-access-4j55d\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.627958 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/092566f7-fc7d-4897-a1f2-4ecedcd3058e-rootfs\") pod \"machine-config-daemon-bwdth\" (UID: \"092566f7-fc7d-4897-a1f2-4ecedcd3058e\") " pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.627975 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-host-run-k8s-cni-cncf-io\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.627990 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-system-cni-dir\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.628005 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-system-cni-dir\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.628019 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-host-var-lib-cni-bin\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.628033 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-multus-socket-dir-parent\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.628049 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-host-run-multus-certs\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.628063 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-etc-kubernetes\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.628077 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-os-release\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.628094 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-multus-cni-dir\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.628115 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.628129 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-host-var-lib-cni-multus\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.628145 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-host-var-lib-kubelet\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.628160 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-tuning-conf-dir\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.628174 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-host-run-netns\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.628189 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/84374038-67ce-4dc0-a2c2-6eed9650c604-multus-daemon-config\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.628203 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8z7ts\" (UniqueName: \"kubernetes.io/projected/84374038-67ce-4dc0-a2c2-6eed9650c604-kube-api-access-8z7ts\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.628218 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-cnibin\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.628233 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nthb2\" (UniqueName: \"kubernetes.io/projected/092566f7-fc7d-4897-a1f2-4ecedcd3058e-kube-api-access-nthb2\") pod \"machine-config-daemon-bwdth\" (UID: \"092566f7-fc7d-4897-a1f2-4ecedcd3058e\") " pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.629003 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.629124 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-host-var-lib-cni-bin\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.629160 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-hostroot\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.629332 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-multus-conf-dir\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.629540 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-os-release\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.629688 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/84374038-67ce-4dc0-a2c2-6eed9650c604-cni-binary-copy\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.629863 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/092566f7-fc7d-4897-a1f2-4ecedcd3058e-rootfs\") pod \"machine-config-daemon-bwdth\" (UID: \"092566f7-fc7d-4897-a1f2-4ecedcd3058e\") " pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.629901 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-host-run-k8s-cni-cncf-io\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.629954 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-system-cni-dir\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.629997 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-system-cni-dir\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.630028 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-host-var-lib-cni-multus\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.630176 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-multus-socket-dir-parent\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.630217 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-host-run-multus-certs\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.630248 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-etc-kubernetes\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.630294 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-os-release\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.630419 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-multus-cni-dir\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.630837 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-cni-binary-copy\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.630856 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/092566f7-fc7d-4897-a1f2-4ecedcd3058e-proxy-tls\") pod \"machine-config-daemon-bwdth\" (UID: \"092566f7-fc7d-4897-a1f2-4ecedcd3058e\") " pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.630875 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-host-var-lib-kubelet\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.631011 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-cnibin\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.631165 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/84374038-67ce-4dc0-a2c2-6eed9650c604-host-run-netns\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.631237 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-cnibin\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.631490 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/84374038-67ce-4dc0-a2c2-6eed9650c604-multus-daemon-config\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.631611 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/092566f7-fc7d-4897-a1f2-4ecedcd3058e-mcd-auth-proxy-config\") pod \"machine-config-daemon-bwdth\" (UID: \"092566f7-fc7d-4897-a1f2-4ecedcd3058e\") " pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.631841 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.631902 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-tuning-conf-dir\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.673547 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.674399 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4j55d\" (UniqueName: \"kubernetes.io/projected/b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093-kube-api-access-4j55d\") pod \"multus-additional-cni-plugins-9gjj9\" (UID: \"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\") " pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.680742 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nthb2\" (UniqueName: \"kubernetes.io/projected/092566f7-fc7d-4897-a1f2-4ecedcd3058e-kube-api-access-nthb2\") pod \"machine-config-daemon-bwdth\" (UID: \"092566f7-fc7d-4897-a1f2-4ecedcd3058e\") " pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.688024 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8z7ts\" (UniqueName: \"kubernetes.io/projected/84374038-67ce-4dc0-a2c2-6eed9650c604-kube-api-access-8z7ts\") pod \"multus-h5s2m\" (UID: \"84374038-67ce-4dc0-a2c2-6eed9650c604\") " pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.719521 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.719546 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.719553 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.719567 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.719575 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:00Z","lastTransitionTime":"2025-11-28T11:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.726968 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.731795 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-h5s2m" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.737788 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:09:00 crc kubenswrapper[4923]: W1128 11:09:00.740508 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod84374038_67ce_4dc0_a2c2_6eed9650c604.slice/crio-5a5b6641e1d3ea3e0c167ac6f6127bafc00719a4fe56359b1f067100d5cacc9e WatchSource:0}: Error finding container 5a5b6641e1d3ea3e0c167ac6f6127bafc00719a4fe56359b1f067100d5cacc9e: Status 404 returned error can't find the container with id 5a5b6641e1d3ea3e0c167ac6f6127bafc00719a4fe56359b1f067100d5cacc9e Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.743611 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.775311 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.821418 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.821454 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.821463 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.821477 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.821486 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:00Z","lastTransitionTime":"2025-11-28T11:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.830749 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.830852 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.830896 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:00 crc kubenswrapper[4923]: E1128 11:09:00.831018 4923 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 11:09:00 crc kubenswrapper[4923]: E1128 11:09:00.831081 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:02.83106321 +0000 UTC m=+21.959747420 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 11:09:00 crc kubenswrapper[4923]: E1128 11:09:00.831381 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:09:02.831370449 +0000 UTC m=+21.960054669 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:09:00 crc kubenswrapper[4923]: E1128 11:09:00.831428 4923 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 11:09:00 crc kubenswrapper[4923]: E1128 11:09:00.831458 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:02.831449741 +0000 UTC m=+21.960133951 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.831633 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.858099 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-68dth"] Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.859075 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.861300 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.861508 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.870790 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.871098 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.871320 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.871587 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.872368 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.876709 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.901278 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.928520 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.928838 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.928846 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.928861 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.928871 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:00Z","lastTransitionTime":"2025-11-28T11:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932104 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-kubelet\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932147 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-ovnkube-script-lib\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932167 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-run-openvswitch\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932184 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-ovn-node-metrics-cert\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932200 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-cni-netd\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932215 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-env-overrides\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932228 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-systemd-units\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932243 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-run-ovn\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932266 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-node-log\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932279 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-slash\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932294 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-etc-openvswitch\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932313 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-cni-bin\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932331 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-var-lib-openvswitch\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932348 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qd9rd\" (UniqueName: \"kubernetes.io/projected/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-kube-api-access-qd9rd\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932369 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-run-systemd\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932384 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-run-ovn-kubernetes\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932398 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-ovnkube-config\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932418 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932439 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-log-socket\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932456 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932473 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.932487 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-run-netns\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:00 crc kubenswrapper[4923]: E1128 11:09:00.932640 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 11:09:00 crc kubenswrapper[4923]: E1128 11:09:00.932654 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 11:09:00 crc kubenswrapper[4923]: E1128 11:09:00.932663 4923 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:00 crc kubenswrapper[4923]: E1128 11:09:00.932699 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:02.932686274 +0000 UTC m=+22.061370484 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:00 crc kubenswrapper[4923]: E1128 11:09:00.932745 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 11:09:00 crc kubenswrapper[4923]: E1128 11:09:00.932755 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 11:09:00 crc kubenswrapper[4923]: E1128 11:09:00.932762 4923 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:00 crc kubenswrapper[4923]: E1128 11:09:00.932780 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:02.932774916 +0000 UTC m=+22.061459116 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.946315 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.970770 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:00 crc kubenswrapper[4923]: I1128 11:09:00.993313 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:00Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.012868 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.031372 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.031402 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.031587 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.031600 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.031609 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:01Z","lastTransitionTime":"2025-11-28T11:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.032038 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033089 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-run-openvswitch\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033115 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-ovn-node-metrics-cert\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033135 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-cni-netd\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033150 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-env-overrides\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033172 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-systemd-units\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033187 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-run-ovn\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033183 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-run-openvswitch\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033202 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-node-log\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033237 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-node-log\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033246 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-slash\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033267 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-etc-openvswitch\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033293 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-cni-bin\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033345 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-var-lib-openvswitch\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033363 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qd9rd\" (UniqueName: \"kubernetes.io/projected/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-kube-api-access-qd9rd\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033380 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-run-systemd\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033403 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-run-ovn-kubernetes\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033419 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-ovnkube-config\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033460 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-log-socket\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033482 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-run-netns\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033498 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033520 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-kubelet\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033537 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-ovnkube-script-lib\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.033796 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-cni-netd\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.034031 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-run-systemd\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.034080 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-slash\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.034104 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-etc-openvswitch\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.034128 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-cni-bin\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.034151 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-var-lib-openvswitch\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.034165 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-ovnkube-script-lib\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.034204 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-run-ovn-kubernetes\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.034356 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-run-netns\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.034390 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-log-socket\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.034416 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.034441 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-kubelet\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.034463 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-systemd-units\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.034487 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-run-ovn\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.034773 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-ovnkube-config\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.034972 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-env-overrides\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.038171 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-ovn-node-metrics-cert\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.052260 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qd9rd\" (UniqueName: \"kubernetes.io/projected/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-kube-api-access-qd9rd\") pod \"ovnkube-node-68dth\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.054575 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.071180 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.086350 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.101844 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.114872 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.131794 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.133376 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.133400 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.133409 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.133424 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.133432 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:01Z","lastTransitionTime":"2025-11-28T11:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.150608 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.163927 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.167745 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.167792 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.167821 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:01 crc kubenswrapper[4923]: E1128 11:09:01.168015 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:01 crc kubenswrapper[4923]: E1128 11:09:01.168124 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:01 crc kubenswrapper[4923]: E1128 11:09:01.168287 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.171769 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.172758 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.173772 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.174637 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.176249 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.176680 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.177017 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.178139 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.178293 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.179113 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.180271 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.180885 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.182019 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.182842 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.183828 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.184512 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.185279 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.186535 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.187373 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.187750 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.188393 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.189072 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.189559 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.190125 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.190557 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.191254 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.191687 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.193853 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.194521 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.195414 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.195961 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.200185 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.200637 4923 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.200733 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: W1128 11:09:01.201816 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod08e03349_56fc_4b2d_93d3_cf2405a4b7ad.slice/crio-8407acc2a5d77884103b79f7b2c84a1e5ab69c4c0f6379e4e358d33b72c0c070 WatchSource:0}: Error finding container 8407acc2a5d77884103b79f7b2c84a1e5ab69c4c0f6379e4e358d33b72c0c070: Status 404 returned error can't find the container with id 8407acc2a5d77884103b79f7b2c84a1e5ab69c4c0f6379e4e358d33b72c0c070 Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.203029 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.203715 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.204336 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.206006 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.207222 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.208090 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.208802 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.209542 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.210738 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.211410 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.212555 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.212636 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.213447 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.226984 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.227602 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.228907 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.230407 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.230948 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.232851 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.234367 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.234902 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.235721 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.237022 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.237353 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.237376 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.237385 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.237398 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.237412 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:01Z","lastTransitionTime":"2025-11-28T11:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.237668 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.256026 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.278364 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.287257 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-766k2" event={"ID":"69fcf39a-3416-4733-a55a-043d5286f8ac","Type":"ContainerStarted","Data":"14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.287299 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-766k2" event={"ID":"69fcf39a-3416-4733-a55a-043d5286f8ac","Type":"ContainerStarted","Data":"41b5dba3d1dee38479cd8e953f9e002b43890a2ef83b2277aed281cfecbad2a8"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.287972 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerStarted","Data":"8407acc2a5d77884103b79f7b2c84a1e5ab69c4c0f6379e4e358d33b72c0c070"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.289319 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.293128 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.293381 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.294415 4923 generic.go:334] "Generic (PLEG): container finished" podID="b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093" containerID="27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184" exitCode=0 Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.294472 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" event={"ID":"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093","Type":"ContainerDied","Data":"27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.294489 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" event={"ID":"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093","Type":"ContainerStarted","Data":"6ce65eae66f1d0cf11cedc8602a67ab9cf708cd169d642a86dbf78a0f58dd626"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.297485 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerStarted","Data":"5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.297526 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerStarted","Data":"9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.297537 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerStarted","Data":"1e555c7f4ee21f17f51873504a98b00544b33c462745c4745addec9f14442269"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.301349 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-h5s2m" event={"ID":"84374038-67ce-4dc0-a2c2-6eed9650c604","Type":"ContainerStarted","Data":"addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.301389 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-h5s2m" event={"ID":"84374038-67ce-4dc0-a2c2-6eed9650c604","Type":"ContainerStarted","Data":"5a5b6641e1d3ea3e0c167ac6f6127bafc00719a4fe56359b1f067100d5cacc9e"} Nov 28 11:09:01 crc kubenswrapper[4923]: E1128 11:09:01.327492 4923 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.349174 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.349261 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.349289 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.349297 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.349312 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.349321 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:01Z","lastTransitionTime":"2025-11-28T11:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.415186 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.451177 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.452097 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.452117 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.452125 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.452139 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.452147 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:01Z","lastTransitionTime":"2025-11-28T11:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.480606 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.493542 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.512699 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.528923 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.545742 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.554146 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.554182 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.554191 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.554207 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.554216 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:01Z","lastTransitionTime":"2025-11-28T11:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.569656 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.586143 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.602297 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.618211 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.633877 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.655872 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.655910 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.655918 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.656266 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.656408 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.656420 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:01Z","lastTransitionTime":"2025-11-28T11:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.680110 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.701360 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.718367 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.746008 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.757960 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.757984 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.757992 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.758005 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.758014 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:01Z","lastTransitionTime":"2025-11-28T11:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.774159 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.798153 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.810405 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.833034 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.844045 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.856202 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.859445 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.859479 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.859488 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.859501 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.859512 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:01Z","lastTransitionTime":"2025-11-28T11:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.872380 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.961116 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.961154 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.961163 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.961176 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:01 crc kubenswrapper[4923]: I1128 11:09:01.961195 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:01Z","lastTransitionTime":"2025-11-28T11:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.063323 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.063600 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.063610 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.063623 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.063633 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:02Z","lastTransitionTime":"2025-11-28T11:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.165444 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.165481 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.165490 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.165504 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.165513 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:02Z","lastTransitionTime":"2025-11-28T11:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.268248 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.268281 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.268291 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.268305 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.268314 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:02Z","lastTransitionTime":"2025-11-28T11:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.306568 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6"} Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.308244 4923 generic.go:334] "Generic (PLEG): container finished" podID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerID="18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f" exitCode=0 Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.308313 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerDied","Data":"18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f"} Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.310415 4923 generic.go:334] "Generic (PLEG): container finished" podID="b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093" containerID="79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276" exitCode=0 Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.310445 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" event={"ID":"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093","Type":"ContainerDied","Data":"79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276"} Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.322789 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.340768 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.366548 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.373415 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.373455 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.373471 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.373485 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.373493 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:02Z","lastTransitionTime":"2025-11-28T11:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.379893 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.392037 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.407563 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.422253 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.437466 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.451913 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.476731 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.476767 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.476775 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.476788 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.476798 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:02Z","lastTransitionTime":"2025-11-28T11:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.476801 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.487643 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.505349 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.515859 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.531919 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.543016 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.552983 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.569358 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.579062 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.579082 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.579091 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.579105 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.579115 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:02Z","lastTransitionTime":"2025-11-28T11:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.583800 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.595182 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.608874 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.621705 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.632325 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.644145 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.653684 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.674230 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.680892 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.680922 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.680943 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.680956 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.680965 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:02Z","lastTransitionTime":"2025-11-28T11:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.688983 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.782948 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.782978 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.782986 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.783005 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.783014 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:02Z","lastTransitionTime":"2025-11-28T11:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.851227 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.851358 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.851406 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:02 crc kubenswrapper[4923]: E1128 11:09:02.851510 4923 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 11:09:02 crc kubenswrapper[4923]: E1128 11:09:02.851561 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:06.85154624 +0000 UTC m=+25.980230450 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 11:09:02 crc kubenswrapper[4923]: E1128 11:09:02.851876 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:09:06.851867459 +0000 UTC m=+25.980551669 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:09:02 crc kubenswrapper[4923]: E1128 11:09:02.851966 4923 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 11:09:02 crc kubenswrapper[4923]: E1128 11:09:02.851994 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:06.851986033 +0000 UTC m=+25.980670243 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.885300 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.885348 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.885358 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.885376 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.885385 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:02Z","lastTransitionTime":"2025-11-28T11:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.952086 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.952153 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:02 crc kubenswrapper[4923]: E1128 11:09:02.952291 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 11:09:02 crc kubenswrapper[4923]: E1128 11:09:02.952331 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 11:09:02 crc kubenswrapper[4923]: E1128 11:09:02.952343 4923 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:02 crc kubenswrapper[4923]: E1128 11:09:02.952364 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 11:09:02 crc kubenswrapper[4923]: E1128 11:09:02.952392 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 11:09:02 crc kubenswrapper[4923]: E1128 11:09:02.952411 4923 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:02 crc kubenswrapper[4923]: E1128 11:09:02.952418 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:06.952400692 +0000 UTC m=+26.081084902 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:02 crc kubenswrapper[4923]: E1128 11:09:02.952478 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:06.952456813 +0000 UTC m=+26.081141053 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.987348 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.987401 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.987418 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.987442 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:02 crc kubenswrapper[4923]: I1128 11:09:02.987459 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:02Z","lastTransitionTime":"2025-11-28T11:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.090047 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.090113 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.090131 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.090156 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.090176 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:03Z","lastTransitionTime":"2025-11-28T11:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.168444 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.168464 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:03 crc kubenswrapper[4923]: E1128 11:09:03.169178 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:03 crc kubenswrapper[4923]: E1128 11:09:03.169318 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.169211 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:03 crc kubenswrapper[4923]: E1128 11:09:03.169470 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.193144 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.193216 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.193240 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.193271 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.193298 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:03Z","lastTransitionTime":"2025-11-28T11:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.278009 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-9qvkm"] Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.278543 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-9qvkm" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.284896 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.285200 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.285557 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.285754 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.295374 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.295407 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.295419 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.295436 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.295461 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:03Z","lastTransitionTime":"2025-11-28T11:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.308870 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.318876 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerStarted","Data":"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4"} Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.318918 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerStarted","Data":"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7"} Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.318946 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerStarted","Data":"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017"} Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.318957 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerStarted","Data":"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7"} Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.318967 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerStarted","Data":"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3"} Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.318974 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerStarted","Data":"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de"} Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.324710 4923 generic.go:334] "Generic (PLEG): container finished" podID="b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093" containerID="b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47" exitCode=0 Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.325445 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" event={"ID":"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093","Type":"ContainerDied","Data":"b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47"} Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.330319 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.348701 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.356479 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cf32d1c9-4639-48a9-b972-c9ad6daec543-host\") pod \"node-ca-9qvkm\" (UID: \"cf32d1c9-4639-48a9-b972-c9ad6daec543\") " pod="openshift-image-registry/node-ca-9qvkm" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.356625 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnwc6\" (UniqueName: \"kubernetes.io/projected/cf32d1c9-4639-48a9-b972-c9ad6daec543-kube-api-access-fnwc6\") pod \"node-ca-9qvkm\" (UID: \"cf32d1c9-4639-48a9-b972-c9ad6daec543\") " pod="openshift-image-registry/node-ca-9qvkm" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.356732 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/cf32d1c9-4639-48a9-b972-c9ad6daec543-serviceca\") pod \"node-ca-9qvkm\" (UID: \"cf32d1c9-4639-48a9-b972-c9ad6daec543\") " pod="openshift-image-registry/node-ca-9qvkm" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.364093 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.377924 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.391766 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.398848 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.398890 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.398901 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.398919 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.398951 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:03Z","lastTransitionTime":"2025-11-28T11:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.406589 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.417977 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.437826 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.448909 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.460364 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/cf32d1c9-4639-48a9-b972-c9ad6daec543-serviceca\") pod \"node-ca-9qvkm\" (UID: \"cf32d1c9-4639-48a9-b972-c9ad6daec543\") " pod="openshift-image-registry/node-ca-9qvkm" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.460430 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cf32d1c9-4639-48a9-b972-c9ad6daec543-host\") pod \"node-ca-9qvkm\" (UID: \"cf32d1c9-4639-48a9-b972-c9ad6daec543\") " pod="openshift-image-registry/node-ca-9qvkm" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.460451 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnwc6\" (UniqueName: \"kubernetes.io/projected/cf32d1c9-4639-48a9-b972-c9ad6daec543-kube-api-access-fnwc6\") pod \"node-ca-9qvkm\" (UID: \"cf32d1c9-4639-48a9-b972-c9ad6daec543\") " pod="openshift-image-registry/node-ca-9qvkm" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.460776 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cf32d1c9-4639-48a9-b972-c9ad6daec543-host\") pod \"node-ca-9qvkm\" (UID: \"cf32d1c9-4639-48a9-b972-c9ad6daec543\") " pod="openshift-image-registry/node-ca-9qvkm" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.461799 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/cf32d1c9-4639-48a9-b972-c9ad6daec543-serviceca\") pod \"node-ca-9qvkm\" (UID: \"cf32d1c9-4639-48a9-b972-c9ad6daec543\") " pod="openshift-image-registry/node-ca-9qvkm" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.464866 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.482613 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.487780 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnwc6\" (UniqueName: \"kubernetes.io/projected/cf32d1c9-4639-48a9-b972-c9ad6daec543-kube-api-access-fnwc6\") pod \"node-ca-9qvkm\" (UID: \"cf32d1c9-4639-48a9-b972-c9ad6daec543\") " pod="openshift-image-registry/node-ca-9qvkm" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.511369 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.511410 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.511419 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.511433 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.511443 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:03Z","lastTransitionTime":"2025-11-28T11:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.517375 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.532316 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.549271 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.561060 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.570568 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.582573 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.595617 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.604082 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-9qvkm" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.613969 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.613999 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.614013 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.614030 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.614041 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:03Z","lastTransitionTime":"2025-11-28T11:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:03 crc kubenswrapper[4923]: W1128 11:09:03.614815 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcf32d1c9_4639_48a9_b972_c9ad6daec543.slice/crio-d4a477f523da1ca82ce654dd4b28ecd27f51ced58e4104093fe1b11ee7b2299b WatchSource:0}: Error finding container d4a477f523da1ca82ce654dd4b28ecd27f51ced58e4104093fe1b11ee7b2299b: Status 404 returned error can't find the container with id d4a477f523da1ca82ce654dd4b28ecd27f51ced58e4104093fe1b11ee7b2299b Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.621085 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.633216 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.652604 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.671959 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.708496 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.716458 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.716485 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.716496 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.716513 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.716524 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:03Z","lastTransitionTime":"2025-11-28T11:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.752011 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.792289 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.818453 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.818492 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.818503 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.818522 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.818547 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:03Z","lastTransitionTime":"2025-11-28T11:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.829280 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.866530 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.921746 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.921777 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.921786 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.921801 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:03 crc kubenswrapper[4923]: I1128 11:09:03.921810 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:03Z","lastTransitionTime":"2025-11-28T11:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.024681 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.024720 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.024733 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.024749 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.024762 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:04Z","lastTransitionTime":"2025-11-28T11:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.127863 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.127901 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.127909 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.127924 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.127945 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:04Z","lastTransitionTime":"2025-11-28T11:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.231161 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.231207 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.231218 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.231236 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.231250 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:04Z","lastTransitionTime":"2025-11-28T11:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.332291 4923 generic.go:334] "Generic (PLEG): container finished" podID="b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093" containerID="62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22" exitCode=0 Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.332393 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" event={"ID":"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093","Type":"ContainerDied","Data":"62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22"} Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.332951 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.332973 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.332983 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.332996 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.333007 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:04Z","lastTransitionTime":"2025-11-28T11:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.335729 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-9qvkm" event={"ID":"cf32d1c9-4639-48a9-b972-c9ad6daec543","Type":"ContainerStarted","Data":"ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf"} Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.335763 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-9qvkm" event={"ID":"cf32d1c9-4639-48a9-b972-c9ad6daec543","Type":"ContainerStarted","Data":"d4a477f523da1ca82ce654dd4b28ecd27f51ced58e4104093fe1b11ee7b2299b"} Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.358383 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.373538 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.388310 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.405185 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.420579 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.435427 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.435459 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.435468 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.435485 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.435494 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:04Z","lastTransitionTime":"2025-11-28T11:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.437879 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.457363 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.472166 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.483507 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.502108 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.516492 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.533958 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.537510 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.537570 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.537657 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.537689 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.537780 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:04Z","lastTransitionTime":"2025-11-28T11:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.549297 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.577659 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.606583 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.623294 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.640511 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.640563 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.640580 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.640604 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.640620 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:04Z","lastTransitionTime":"2025-11-28T11:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.643064 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.666796 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.682128 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.695461 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.717559 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.742378 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.742413 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.742421 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.742436 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.742447 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:04Z","lastTransitionTime":"2025-11-28T11:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.750963 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.792254 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.829651 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.844545 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.844579 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.844595 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.844610 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.844618 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:04Z","lastTransitionTime":"2025-11-28T11:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.876234 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.914256 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.947108 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.947135 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.947144 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.947159 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.947169 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:04Z","lastTransitionTime":"2025-11-28T11:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.955080 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:04 crc kubenswrapper[4923]: I1128 11:09:04.990552 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.049477 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.049510 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.049518 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.049531 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.049540 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:05Z","lastTransitionTime":"2025-11-28T11:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.151514 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.151583 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.151596 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.151615 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.151629 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:05Z","lastTransitionTime":"2025-11-28T11:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.168056 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.168126 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:05 crc kubenswrapper[4923]: E1128 11:09:05.168166 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.168185 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:05 crc kubenswrapper[4923]: E1128 11:09:05.168261 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:05 crc kubenswrapper[4923]: E1128 11:09:05.168468 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.253685 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.253725 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.253736 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.253755 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.253766 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:05Z","lastTransitionTime":"2025-11-28T11:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.342009 4923 generic.go:334] "Generic (PLEG): container finished" podID="b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093" containerID="2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3" exitCode=0 Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.342057 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" event={"ID":"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093","Type":"ContainerDied","Data":"2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3"} Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.350435 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerStarted","Data":"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65"} Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.356099 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.356127 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.356136 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.356150 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.356160 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:05Z","lastTransitionTime":"2025-11-28T11:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.358858 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:05Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.373300 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:05Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.393013 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:05Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.410024 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:05Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.436270 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:05Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.454209 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:05Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.458499 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.458562 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.458580 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.458604 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.458621 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:05Z","lastTransitionTime":"2025-11-28T11:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.468241 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:05Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.484252 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:05Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.496807 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:05Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.513543 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:05Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.534630 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:05Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.546522 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:05Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.565345 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:05Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.566920 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.566952 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.566963 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.566975 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.566984 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:05Z","lastTransitionTime":"2025-11-28T11:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.583442 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:05Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.669186 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.669213 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.669221 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.669235 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.669245 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:05Z","lastTransitionTime":"2025-11-28T11:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.771605 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.771630 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.771638 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.771649 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.771657 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:05Z","lastTransitionTime":"2025-11-28T11:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.875003 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.875048 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.875066 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.875088 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.875105 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:05Z","lastTransitionTime":"2025-11-28T11:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.978062 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.978516 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.978532 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.978550 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:05 crc kubenswrapper[4923]: I1128 11:09:05.978562 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:05Z","lastTransitionTime":"2025-11-28T11:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.080448 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.080475 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.080483 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.080506 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.080514 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:06Z","lastTransitionTime":"2025-11-28T11:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.182788 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.182854 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.182872 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.182897 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.182918 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:06Z","lastTransitionTime":"2025-11-28T11:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.287212 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.287282 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.287299 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.287325 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.287342 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:06Z","lastTransitionTime":"2025-11-28T11:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.359215 4923 generic.go:334] "Generic (PLEG): container finished" podID="b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093" containerID="9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525" exitCode=0 Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.359265 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" event={"ID":"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093","Type":"ContainerDied","Data":"9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525"} Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.377852 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:06Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.394190 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.394242 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.394258 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.394286 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.394303 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:06Z","lastTransitionTime":"2025-11-28T11:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.407717 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:06Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.425416 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:06Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.445579 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:06Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.457175 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:06Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.473877 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:06Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.488644 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:06Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.501750 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.501801 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.501819 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.501845 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.501863 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:06Z","lastTransitionTime":"2025-11-28T11:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.513371 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:06Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.532897 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:06Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.554167 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:06Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.567852 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:06Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.585902 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:06Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.601986 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:06Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.605393 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.605441 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.605462 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.605485 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.605502 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:06Z","lastTransitionTime":"2025-11-28T11:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.621188 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:06Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.709052 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.709102 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.709118 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.709144 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.709160 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:06Z","lastTransitionTime":"2025-11-28T11:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.812311 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.812371 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.812393 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.812423 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.812443 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:06Z","lastTransitionTime":"2025-11-28T11:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.892028 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:09:06 crc kubenswrapper[4923]: E1128 11:09:06.892165 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:09:14.892140158 +0000 UTC m=+34.020824388 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.892199 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.892257 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:06 crc kubenswrapper[4923]: E1128 11:09:06.892348 4923 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 11:09:06 crc kubenswrapper[4923]: E1128 11:09:06.892389 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:14.892379175 +0000 UTC m=+34.021063395 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 11:09:06 crc kubenswrapper[4923]: E1128 11:09:06.892720 4923 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 11:09:06 crc kubenswrapper[4923]: E1128 11:09:06.892764 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:14.892754465 +0000 UTC m=+34.021438685 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.915844 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.915894 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.915910 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.915951 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.915969 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:06Z","lastTransitionTime":"2025-11-28T11:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.993419 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:06 crc kubenswrapper[4923]: I1128 11:09:06.993477 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:06 crc kubenswrapper[4923]: E1128 11:09:06.993667 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 11:09:06 crc kubenswrapper[4923]: E1128 11:09:06.993688 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 11:09:06 crc kubenswrapper[4923]: E1128 11:09:06.993691 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 11:09:06 crc kubenswrapper[4923]: E1128 11:09:06.993747 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 11:09:06 crc kubenswrapper[4923]: E1128 11:09:06.993702 4923 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:06 crc kubenswrapper[4923]: E1128 11:09:06.993767 4923 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:06 crc kubenswrapper[4923]: E1128 11:09:06.993826 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:14.993810453 +0000 UTC m=+34.122494673 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:06 crc kubenswrapper[4923]: E1128 11:09:06.993846 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:14.993837933 +0000 UTC m=+34.122522163 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.018229 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.018270 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.018283 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.018302 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.018315 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:07Z","lastTransitionTime":"2025-11-28T11:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.121324 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.121777 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.122181 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.122536 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.122978 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:07Z","lastTransitionTime":"2025-11-28T11:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.168028 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.168101 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:07 crc kubenswrapper[4923]: E1128 11:09:07.168198 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.168275 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:07 crc kubenswrapper[4923]: E1128 11:09:07.168368 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:07 crc kubenswrapper[4923]: E1128 11:09:07.168448 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.226152 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.226202 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.226225 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.226257 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.226283 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:07Z","lastTransitionTime":"2025-11-28T11:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.335363 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.335421 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.335441 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.335465 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.335484 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:07Z","lastTransitionTime":"2025-11-28T11:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.366425 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerStarted","Data":"18927b205b4749967b7844a91e7f60621e025df765f43689d07e9d95e0758f35"} Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.370488 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" event={"ID":"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093","Type":"ContainerStarted","Data":"b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9"} Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.390307 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:07Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.407197 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:07Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.425073 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:07Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.437242 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.437274 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.437287 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.437305 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.437318 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:07Z","lastTransitionTime":"2025-11-28T11:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.441323 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:07Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.455104 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:07Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.472240 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:07Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.485562 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:07Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.499048 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:07Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.521210 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:07Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.532256 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:07Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.539563 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.539619 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.539636 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.539660 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.539679 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:07Z","lastTransitionTime":"2025-11-28T11:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.546385 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:07Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.559041 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:07Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.574522 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:07Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.590029 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:07Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.642061 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.642112 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.642129 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.642151 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.642170 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:07Z","lastTransitionTime":"2025-11-28T11:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.744406 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.744449 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.744460 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.744476 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.744487 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:07Z","lastTransitionTime":"2025-11-28T11:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.847110 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.847143 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.847152 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.847167 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.847179 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:07Z","lastTransitionTime":"2025-11-28T11:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.950281 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.950339 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.950356 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.950381 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:07 crc kubenswrapper[4923]: I1128 11:09:07.950403 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:07Z","lastTransitionTime":"2025-11-28T11:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.052886 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.052967 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.052985 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.053010 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.053027 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:08Z","lastTransitionTime":"2025-11-28T11:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.155256 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.155314 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.155332 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.155357 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.155375 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:08Z","lastTransitionTime":"2025-11-28T11:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.258088 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.258144 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.258161 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.258184 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.258201 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:08Z","lastTransitionTime":"2025-11-28T11:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.361364 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.361664 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.361682 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.361707 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.361729 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:08Z","lastTransitionTime":"2025-11-28T11:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.373856 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.373906 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.395891 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.413572 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.417984 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.418406 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.442607 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.460484 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.465441 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.465512 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.465530 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.465554 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.465571 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:08Z","lastTransitionTime":"2025-11-28T11:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.483335 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.503610 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.542648 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18927b205b4749967b7844a91e7f60621e025df765f43689d07e9d95e0758f35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.565421 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.568581 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.568645 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.568663 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.568688 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.568705 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:08Z","lastTransitionTime":"2025-11-28T11:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.587546 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.607424 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.627111 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.651541 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.672066 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.672131 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.672149 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.672176 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.672193 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:08Z","lastTransitionTime":"2025-11-28T11:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.674412 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.688592 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.708793 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.726278 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.741134 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.759851 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.773640 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.775155 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.775198 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.775209 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.775226 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.775238 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:08Z","lastTransitionTime":"2025-11-28T11:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.786227 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.808030 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18927b205b4749967b7844a91e7f60621e025df765f43689d07e9d95e0758f35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.819658 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.838841 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.852719 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.870316 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.877665 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.877728 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.877748 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.877772 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.877789 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:08Z","lastTransitionTime":"2025-11-28T11:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.886889 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.902714 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.929240 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:08Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.980203 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.980246 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.980257 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.980276 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:08 crc kubenswrapper[4923]: I1128 11:09:08.980288 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:08Z","lastTransitionTime":"2025-11-28T11:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.082085 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.082117 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.082125 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.082137 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.082145 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:09Z","lastTransitionTime":"2025-11-28T11:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.168129 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.168158 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.168173 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:09 crc kubenswrapper[4923]: E1128 11:09:09.168244 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:09 crc kubenswrapper[4923]: E1128 11:09:09.168556 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:09 crc kubenswrapper[4923]: E1128 11:09:09.168599 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.184488 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.184521 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.184530 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.184545 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.184554 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:09Z","lastTransitionTime":"2025-11-28T11:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.286194 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.286232 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.286241 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.286257 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.286266 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:09Z","lastTransitionTime":"2025-11-28T11:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.375920 4923 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.388622 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.388644 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.388653 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.388665 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.388674 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:09Z","lastTransitionTime":"2025-11-28T11:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.490801 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.490854 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.490867 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.490890 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.490908 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:09Z","lastTransitionTime":"2025-11-28T11:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.511347 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.511380 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.511390 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.511403 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.511412 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:09Z","lastTransitionTime":"2025-11-28T11:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:09 crc kubenswrapper[4923]: E1128 11:09:09.526800 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:09Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.536571 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.536623 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.536633 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.536661 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.536674 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:09Z","lastTransitionTime":"2025-11-28T11:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:09 crc kubenswrapper[4923]: E1128 11:09:09.550967 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:09Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.554263 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.554287 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.554296 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.554317 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.554328 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:09Z","lastTransitionTime":"2025-11-28T11:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:09 crc kubenswrapper[4923]: E1128 11:09:09.573697 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:09Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.578822 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.578860 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.578873 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.578890 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.578902 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:09Z","lastTransitionTime":"2025-11-28T11:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:09 crc kubenswrapper[4923]: E1128 11:09:09.593651 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:09Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.597029 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.597056 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.597084 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.597100 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.597111 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:09Z","lastTransitionTime":"2025-11-28T11:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:09 crc kubenswrapper[4923]: E1128 11:09:09.608478 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:09Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:09 crc kubenswrapper[4923]: E1128 11:09:09.608634 4923 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.610257 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.610319 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.610339 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.610366 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.610383 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:09Z","lastTransitionTime":"2025-11-28T11:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.713292 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.713341 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.713351 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.713365 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.713374 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:09Z","lastTransitionTime":"2025-11-28T11:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.816567 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.816622 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.816640 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.816665 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.816683 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:09Z","lastTransitionTime":"2025-11-28T11:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.919318 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.919376 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.919394 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.919419 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:09 crc kubenswrapper[4923]: I1128 11:09:09.919436 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:09Z","lastTransitionTime":"2025-11-28T11:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.022300 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.022353 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.022370 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.022396 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.022414 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:10Z","lastTransitionTime":"2025-11-28T11:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.126024 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.126097 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.126115 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.126141 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.126159 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:10Z","lastTransitionTime":"2025-11-28T11:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.247399 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.247460 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.247482 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.247513 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.247534 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:10Z","lastTransitionTime":"2025-11-28T11:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.350181 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.350229 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.350244 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.350264 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.350280 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:10Z","lastTransitionTime":"2025-11-28T11:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.381313 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovnkube-controller/0.log" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.389537 4923 generic.go:334] "Generic (PLEG): container finished" podID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerID="18927b205b4749967b7844a91e7f60621e025df765f43689d07e9d95e0758f35" exitCode=1 Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.389596 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerDied","Data":"18927b205b4749967b7844a91e7f60621e025df765f43689d07e9d95e0758f35"} Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.390687 4923 scope.go:117] "RemoveContainer" containerID="18927b205b4749967b7844a91e7f60621e025df765f43689d07e9d95e0758f35" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.413270 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:10Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.453265 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.453323 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.453342 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.453368 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.453388 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:10Z","lastTransitionTime":"2025-11-28T11:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.465536 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:10Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.499254 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18927b205b4749967b7844a91e7f60621e025df765f43689d07e9d95e0758f35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18927b205b4749967b7844a91e7f60621e025df765f43689d07e9d95e0758f35\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\".574516 6144 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 11:09:09.575176 6144 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 11:09:09.574471 6144 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 11:09:09.574693 6144 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 11:09:09.574655 6144 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 11:09:09.577244 6144 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 11:09:09.577365 6144 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 11:09:09.577433 6144 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 11:09:09.577518 6144 factory.go:656] Stopping watch factory\\\\nI1128 11:09:09.577576 6144 ovnkube.go:599] Stopped ovnkube\\\\nI1128 11:09:09.577652 6144 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 11:09:09.577709 6144 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 11:09:0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:10Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.517608 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:10Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.531302 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:10Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.541593 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:10Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.552395 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:10Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.555866 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.555903 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.555913 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.555943 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.555955 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:10Z","lastTransitionTime":"2025-11-28T11:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.566520 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:10Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.578125 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:10Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.592858 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:10Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.610273 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:10Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.626055 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:10Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.641605 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:10Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.655047 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:10Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.661227 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.661263 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.661314 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.661337 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.661353 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:10Z","lastTransitionTime":"2025-11-28T11:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.764163 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.764215 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.764240 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.764272 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.764295 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:10Z","lastTransitionTime":"2025-11-28T11:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.867390 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.867437 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.867453 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.867476 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.867493 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:10Z","lastTransitionTime":"2025-11-28T11:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.974196 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.974253 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.974271 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.974299 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:10 crc kubenswrapper[4923]: I1128 11:09:10.974318 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:10Z","lastTransitionTime":"2025-11-28T11:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.077595 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.077645 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.077660 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.077681 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.077695 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:11Z","lastTransitionTime":"2025-11-28T11:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.168660 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.168676 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.168690 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:11 crc kubenswrapper[4923]: E1128 11:09:11.168981 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:11 crc kubenswrapper[4923]: E1128 11:09:11.169118 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:11 crc kubenswrapper[4923]: E1128 11:09:11.169323 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.180699 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.180983 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.181223 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.181422 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.181605 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:11Z","lastTransitionTime":"2025-11-28T11:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.186421 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.203128 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.223996 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.252917 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.271095 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.284251 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.284500 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.284618 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.284712 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.284825 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:11Z","lastTransitionTime":"2025-11-28T11:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.293249 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.318050 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.340706 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.365245 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18927b205b4749967b7844a91e7f60621e025df765f43689d07e9d95e0758f35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18927b205b4749967b7844a91e7f60621e025df765f43689d07e9d95e0758f35\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\".574516 6144 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 11:09:09.575176 6144 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 11:09:09.574471 6144 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 11:09:09.574693 6144 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 11:09:09.574655 6144 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 11:09:09.577244 6144 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 11:09:09.577365 6144 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 11:09:09.577433 6144 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 11:09:09.577518 6144 factory.go:656] Stopping watch factory\\\\nI1128 11:09:09.577576 6144 ovnkube.go:599] Stopped ovnkube\\\\nI1128 11:09:09.577652 6144 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 11:09:09.577709 6144 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 11:09:0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.417417 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.417462 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.417482 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.417513 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.417525 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:11Z","lastTransitionTime":"2025-11-28T11:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.424982 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovnkube-controller/0.log" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.431085 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerStarted","Data":"bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271"} Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.431213 4923 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.439957 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.454218 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.467409 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.488684 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.497148 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.508539 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.519784 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.519815 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.519840 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.519855 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.519864 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:11Z","lastTransitionTime":"2025-11-28T11:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.521162 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.534677 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.545581 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.559757 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.574359 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.588904 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.604852 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.621719 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.621967 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.621981 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.621997 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.622009 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:11Z","lastTransitionTime":"2025-11-28T11:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.629606 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18927b205b4749967b7844a91e7f60621e025df765f43689d07e9d95e0758f35\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\".574516 6144 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 11:09:09.575176 6144 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 11:09:09.574471 6144 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 11:09:09.574693 6144 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 11:09:09.574655 6144 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 11:09:09.577244 6144 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 11:09:09.577365 6144 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 11:09:09.577433 6144 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 11:09:09.577518 6144 factory.go:656] Stopping watch factory\\\\nI1128 11:09:09.577576 6144 ovnkube.go:599] Stopped ovnkube\\\\nI1128 11:09:09.577652 6144 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 11:09:09.577709 6144 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 11:09:0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.644693 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.659290 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.669564 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.685570 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.697219 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:11Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.724451 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.724492 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.724503 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.724520 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.724532 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:11Z","lastTransitionTime":"2025-11-28T11:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.826742 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.826811 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.826831 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.826855 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.826873 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:11Z","lastTransitionTime":"2025-11-28T11:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.930143 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.930214 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.930235 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.930262 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:11 crc kubenswrapper[4923]: I1128 11:09:11.930282 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:11Z","lastTransitionTime":"2025-11-28T11:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.033319 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.033375 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.033388 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.033408 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.033422 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:12Z","lastTransitionTime":"2025-11-28T11:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.056262 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.076369 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:12Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.097528 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:12Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.115325 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:12Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.137071 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.137131 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.137147 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.137172 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.137190 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:12Z","lastTransitionTime":"2025-11-28T11:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.141706 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:12Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.155973 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:12Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.175042 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:12Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.192980 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:12Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.211162 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:12Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.228111 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:12Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.240241 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.240291 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.240307 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.240327 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.240340 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:12Z","lastTransitionTime":"2025-11-28T11:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.248394 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:12Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.266481 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:12Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.301721 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18927b205b4749967b7844a91e7f60621e025df765f43689d07e9d95e0758f35\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\".574516 6144 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 11:09:09.575176 6144 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 11:09:09.574471 6144 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 11:09:09.574693 6144 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 11:09:09.574655 6144 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 11:09:09.577244 6144 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 11:09:09.577365 6144 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 11:09:09.577433 6144 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 11:09:09.577518 6144 factory.go:656] Stopping watch factory\\\\nI1128 11:09:09.577576 6144 ovnkube.go:599] Stopped ovnkube\\\\nI1128 11:09:09.577652 6144 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 11:09:09.577709 6144 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 11:09:0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:12Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.325273 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:12Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.342909 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.342980 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.342998 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.343020 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.343039 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:12Z","lastTransitionTime":"2025-11-28T11:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.346849 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:12Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.435263 4923 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.445977 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.446040 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.446061 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.446088 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.446105 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:12Z","lastTransitionTime":"2025-11-28T11:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.549264 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.549333 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.549354 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.549381 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.549403 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:12Z","lastTransitionTime":"2025-11-28T11:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.652413 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.652496 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.652555 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.652589 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.652614 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:12Z","lastTransitionTime":"2025-11-28T11:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.755504 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.755562 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.755582 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.755608 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.755624 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:12Z","lastTransitionTime":"2025-11-28T11:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.859286 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.859344 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.859361 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.859383 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.859401 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:12Z","lastTransitionTime":"2025-11-28T11:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.962027 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.962111 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.962135 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.962168 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:12 crc kubenswrapper[4923]: I1128 11:09:12.962191 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:12Z","lastTransitionTime":"2025-11-28T11:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.065905 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.066009 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.066029 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.066055 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.066074 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:13Z","lastTransitionTime":"2025-11-28T11:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.168046 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.168080 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.168149 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:13 crc kubenswrapper[4923]: E1128 11:09:13.168248 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:13 crc kubenswrapper[4923]: E1128 11:09:13.168381 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:13 crc kubenswrapper[4923]: E1128 11:09:13.168640 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.171376 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.171412 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.171423 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.171436 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.171451 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:13Z","lastTransitionTime":"2025-11-28T11:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.262234 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg"] Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.262859 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.266979 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.267271 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.275025 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.275082 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.275099 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.275126 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.275143 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:13Z","lastTransitionTime":"2025-11-28T11:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.275093 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b1f111d9-e2b2-44b9-9592-bc5d4fef01f0-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-8klhg\" (UID: \"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.275390 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vq594\" (UniqueName: \"kubernetes.io/projected/b1f111d9-e2b2-44b9-9592-bc5d4fef01f0-kube-api-access-vq594\") pod \"ovnkube-control-plane-749d76644c-8klhg\" (UID: \"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.275584 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b1f111d9-e2b2-44b9-9592-bc5d4fef01f0-env-overrides\") pod \"ovnkube-control-plane-749d76644c-8klhg\" (UID: \"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.275695 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b1f111d9-e2b2-44b9-9592-bc5d4fef01f0-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-8klhg\" (UID: \"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.289989 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.309891 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.334441 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.351335 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.371014 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.376174 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b1f111d9-e2b2-44b9-9592-bc5d4fef01f0-env-overrides\") pod \"ovnkube-control-plane-749d76644c-8klhg\" (UID: \"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.376309 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b1f111d9-e2b2-44b9-9592-bc5d4fef01f0-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-8klhg\" (UID: \"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.376358 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b1f111d9-e2b2-44b9-9592-bc5d4fef01f0-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-8klhg\" (UID: \"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.376443 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vq594\" (UniqueName: \"kubernetes.io/projected/b1f111d9-e2b2-44b9-9592-bc5d4fef01f0-kube-api-access-vq594\") pod \"ovnkube-control-plane-749d76644c-8klhg\" (UID: \"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.377416 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b1f111d9-e2b2-44b9-9592-bc5d4fef01f0-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-8klhg\" (UID: \"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.378398 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b1f111d9-e2b2-44b9-9592-bc5d4fef01f0-env-overrides\") pod \"ovnkube-control-plane-749d76644c-8klhg\" (UID: \"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.379555 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.379593 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.379603 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.379620 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.379631 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:13Z","lastTransitionTime":"2025-11-28T11:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.385393 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b1f111d9-e2b2-44b9-9592-bc5d4fef01f0-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-8klhg\" (UID: \"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.397073 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.407324 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vq594\" (UniqueName: \"kubernetes.io/projected/b1f111d9-e2b2-44b9-9592-bc5d4fef01f0-kube-api-access-vq594\") pod \"ovnkube-control-plane-749d76644c-8klhg\" (UID: \"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.411409 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.426435 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.445142 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovnkube-controller/1.log" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.447228 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.447869 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovnkube-controller/0.log" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.451905 4923 generic.go:334] "Generic (PLEG): container finished" podID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerID="bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271" exitCode=1 Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.451966 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerDied","Data":"bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271"} Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.452035 4923 scope.go:117] "RemoveContainer" containerID="18927b205b4749967b7844a91e7f60621e025df765f43689d07e9d95e0758f35" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.452811 4923 scope.go:117] "RemoveContainer" containerID="bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271" Nov 28 11:09:13 crc kubenswrapper[4923]: E1128 11:09:13.453013 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\"" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.461415 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.475562 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.482537 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.482586 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.482598 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.482620 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.482632 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:13Z","lastTransitionTime":"2025-11-28T11:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.490110 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.504132 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.528834 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18927b205b4749967b7844a91e7f60621e025df765f43689d07e9d95e0758f35\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\".574516 6144 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 11:09:09.575176 6144 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 11:09:09.574471 6144 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 11:09:09.574693 6144 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 11:09:09.574655 6144 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 11:09:09.577244 6144 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 11:09:09.577365 6144 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 11:09:09.577433 6144 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 11:09:09.577518 6144 factory.go:656] Stopping watch factory\\\\nI1128 11:09:09.577576 6144 ovnkube.go:599] Stopped ovnkube\\\\nI1128 11:09:09.577652 6144 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 11:09:09.577709 6144 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 11:09:0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.544511 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.561431 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.575108 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.585179 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.585241 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.585260 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.585287 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.585307 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:13Z","lastTransitionTime":"2025-11-28T11:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.590512 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.599944 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18927b205b4749967b7844a91e7f60621e025df765f43689d07e9d95e0758f35\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\".574516 6144 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 11:09:09.575176 6144 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 11:09:09.574471 6144 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 11:09:09.574693 6144 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 11:09:09.574655 6144 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 11:09:09.577244 6144 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 11:09:09.577365 6144 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 11:09:09.577433 6144 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 11:09:09.577518 6144 factory.go:656] Stopping watch factory\\\\nI1128 11:09:09.577576 6144 ovnkube.go:599] Stopped ovnkube\\\\nI1128 11:09:09.577652 6144 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 11:09:09.577709 6144 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 11:09:0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"message\\\":\\\"tors_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.189:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d389393c-7ba9-422c-b3f5-06e391d537d2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 11:09:11.808965 6262 obj_retry.go:409] Going to retry *v1.Pod resource setup for 10 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-multus/multus-additional-cni-plugins-9gjj9 openshift-network-console/networking-console-plugin-85b44fc459-gdk6g openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-9qvkm openshift-network-node-identity/network-node-identity-vrzqb openshift-machine-config-operator/machine-config-daemon-bwdth openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-diagnostics/network-check-target-xd92c openshift-network-operator/iptables-alerter-4ln5h]\\\\nI1128 11:09:11.809487 6262 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nF1128 11:09:11.809540 6262 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.619869 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.638505 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.660490 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.680448 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.688483 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.688528 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.688540 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.688561 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.688577 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:13Z","lastTransitionTime":"2025-11-28T11:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.699584 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.712679 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.734121 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.747303 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.764878 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.781830 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.792292 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.792372 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.792396 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.792430 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.792453 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:13Z","lastTransitionTime":"2025-11-28T11:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.804779 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.823447 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:13Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.895315 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.895372 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.895391 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.895413 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.895429 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:13Z","lastTransitionTime":"2025-11-28T11:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.997709 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.997749 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.997766 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.997788 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:13 crc kubenswrapper[4923]: I1128 11:09:13.997805 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:13Z","lastTransitionTime":"2025-11-28T11:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:14 crc kubenswrapper[4923]: W1128 11:09:14.035785 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb1f111d9_e2b2_44b9_9592_bc5d4fef01f0.slice/crio-1ad94a4a0f6511787c778bbb0c4ca1474c0f4265496164e277c9344cd94ac830 WatchSource:0}: Error finding container 1ad94a4a0f6511787c778bbb0c4ca1474c0f4265496164e277c9344cd94ac830: Status 404 returned error can't find the container with id 1ad94a4a0f6511787c778bbb0c4ca1474c0f4265496164e277c9344cd94ac830 Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.100686 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.100750 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.100770 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.100803 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.100824 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:14Z","lastTransitionTime":"2025-11-28T11:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.204111 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.204182 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.204205 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.204260 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.204289 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:14Z","lastTransitionTime":"2025-11-28T11:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.308491 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.308556 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.308573 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.308597 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.308615 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:14Z","lastTransitionTime":"2025-11-28T11:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.413888 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.414369 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.414690 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.415164 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.415376 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:14Z","lastTransitionTime":"2025-11-28T11:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.465577 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovnkube-controller/1.log" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.476348 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" event={"ID":"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0","Type":"ContainerStarted","Data":"69bb796e49d5ca00e472f027f1443316695a4e243faff1eec26bc13d67bbc60a"} Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.476636 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" event={"ID":"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0","Type":"ContainerStarted","Data":"1ad94a4a0f6511787c778bbb0c4ca1474c0f4265496164e277c9344cd94ac830"} Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.519479 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.519532 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.519550 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.519576 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.519595 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:14Z","lastTransitionTime":"2025-11-28T11:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.622624 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.622669 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.622687 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.622711 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.622727 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:14Z","lastTransitionTime":"2025-11-28T11:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.725386 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.725430 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.725446 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.725463 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.725475 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:14Z","lastTransitionTime":"2025-11-28T11:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.820077 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-g2kmb"] Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.821015 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:14 crc kubenswrapper[4923]: E1128 11:09:14.821137 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.841882 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:14Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.860888 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:14Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.879025 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:14Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.900368 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:14Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.907855 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.908211 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.908396 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmpxf\" (UniqueName: \"kubernetes.io/projected/b483d037-b692-45d5-bb83-02e029649100-kube-api-access-gmpxf\") pod \"network-metrics-daemon-g2kmb\" (UID: \"b483d037-b692-45d5-bb83-02e029649100\") " pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.908718 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs\") pod \"network-metrics-daemon-g2kmb\" (UID: \"b483d037-b692-45d5-bb83-02e029649100\") " pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.908902 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:14 crc kubenswrapper[4923]: E1128 11:09:14.909175 4923 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 11:09:14 crc kubenswrapper[4923]: E1128 11:09:14.909380 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:30.909358956 +0000 UTC m=+50.038043206 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 11:09:14 crc kubenswrapper[4923]: E1128 11:09:14.909967 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:09:30.909925432 +0000 UTC m=+50.038609682 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:09:14 crc kubenswrapper[4923]: E1128 11:09:14.910218 4923 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 11:09:14 crc kubenswrapper[4923]: E1128 11:09:14.910392 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:30.910375025 +0000 UTC m=+50.039059265 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.915367 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:14Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.929722 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b483d037-b692-45d5-bb83-02e029649100\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-g2kmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:14Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.945538 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:14Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.962895 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:14Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.976999 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:14Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:14 crc kubenswrapper[4923]: I1128 11:09:14.994886 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:14Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.009698 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.010046 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.010328 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmpxf\" (UniqueName: \"kubernetes.io/projected/b483d037-b692-45d5-bb83-02e029649100-kube-api-access-gmpxf\") pod \"network-metrics-daemon-g2kmb\" (UID: \"b483d037-b692-45d5-bb83-02e029649100\") " pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.010558 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs\") pod \"network-metrics-daemon-g2kmb\" (UID: \"b483d037-b692-45d5-bb83-02e029649100\") " pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:15 crc kubenswrapper[4923]: E1128 11:09:15.011012 4923 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 11:09:15 crc kubenswrapper[4923]: E1128 11:09:15.011284 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs podName:b483d037-b692-45d5-bb83-02e029649100 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:15.511258678 +0000 UTC m=+34.639942918 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs") pod "network-metrics-daemon-g2kmb" (UID: "b483d037-b692-45d5-bb83-02e029649100") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 11:09:15 crc kubenswrapper[4923]: E1128 11:09:15.011845 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 11:09:15 crc kubenswrapper[4923]: E1128 11:09:15.012051 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 11:09:15 crc kubenswrapper[4923]: E1128 11:09:15.012224 4923 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:15 crc kubenswrapper[4923]: E1128 11:09:15.012402 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:31.012383629 +0000 UTC m=+50.141067869 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:15 crc kubenswrapper[4923]: E1128 11:09:15.012624 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 11:09:15 crc kubenswrapper[4923]: E1128 11:09:15.012761 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 11:09:15 crc kubenswrapper[4923]: E1128 11:09:15.012882 4923 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:15 crc kubenswrapper[4923]: E1128 11:09:15.013070 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:31.013052948 +0000 UTC m=+50.141737188 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.014345 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.042003 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmpxf\" (UniqueName: \"kubernetes.io/projected/b483d037-b692-45d5-bb83-02e029649100-kube-api-access-gmpxf\") pod \"network-metrics-daemon-g2kmb\" (UID: \"b483d037-b692-45d5-bb83-02e029649100\") " pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.042341 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.059973 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.076448 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.090476 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.116478 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18927b205b4749967b7844a91e7f60621e025df765f43689d07e9d95e0758f35\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:09Z\\\",\\\"message\\\":\\\".574516 6144 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 11:09:09.575176 6144 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 11:09:09.574471 6144 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 11:09:09.574693 6144 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 11:09:09.574655 6144 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 11:09:09.577244 6144 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 11:09:09.577365 6144 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 11:09:09.577433 6144 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 11:09:09.577518 6144 factory.go:656] Stopping watch factory\\\\nI1128 11:09:09.577576 6144 ovnkube.go:599] Stopped ovnkube\\\\nI1128 11:09:09.577652 6144 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 11:09:09.577709 6144 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 11:09:0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"message\\\":\\\"tors_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.189:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d389393c-7ba9-422c-b3f5-06e391d537d2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 11:09:11.808965 6262 obj_retry.go:409] Going to retry *v1.Pod resource setup for 10 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-multus/multus-additional-cni-plugins-9gjj9 openshift-network-console/networking-console-plugin-85b44fc459-gdk6g openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-9qvkm openshift-network-node-identity/network-node-identity-vrzqb openshift-machine-config-operator/machine-config-daemon-bwdth openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-diagnostics/network-check-target-xd92c openshift-network-operator/iptables-alerter-4ln5h]\\\\nI1128 11:09:11.809487 6262 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nF1128 11:09:11.809540 6262 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.494511 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.495721 4923 scope.go:117] "RemoveContainer" containerID="bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271" Nov 28 11:09:15 crc kubenswrapper[4923]: E1128 11:09:15.496060 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\"" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.509819 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.516735 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs\") pod \"network-metrics-daemon-g2kmb\" (UID: \"b483d037-b692-45d5-bb83-02e029649100\") " pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:15 crc kubenswrapper[4923]: E1128 11:09:15.518694 4923 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 11:09:15 crc kubenswrapper[4923]: E1128 11:09:15.518778 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs podName:b483d037-b692-45d5-bb83-02e029649100 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:16.518754417 +0000 UTC m=+35.647438667 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs") pod "network-metrics-daemon-g2kmb" (UID: "b483d037-b692-45d5-bb83-02e029649100") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.529578 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.543302 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.558388 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b483d037-b692-45d5-bb83-02e029649100\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-g2kmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.574801 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.591980 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.609433 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.627434 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.648501 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.666680 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.693624 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"message\\\":\\\"tors_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.189:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d389393c-7ba9-422c-b3f5-06e391d537d2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 11:09:11.808965 6262 obj_retry.go:409] Going to retry *v1.Pod resource setup for 10 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-multus/multus-additional-cni-plugins-9gjj9 openshift-network-console/networking-console-plugin-85b44fc459-gdk6g openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-9qvkm openshift-network-node-identity/network-node-identity-vrzqb openshift-machine-config-operator/machine-config-daemon-bwdth openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-diagnostics/network-check-target-xd92c openshift-network-operator/iptables-alerter-4ln5h]\\\\nI1128 11:09:11.809487 6262 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nF1128 11:09:11.809540 6262 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.707871 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.725623 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.742562 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.757863 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.774137 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:15Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.993544 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:15 crc kubenswrapper[4923]: E1128 11:09:15.993734 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.994064 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.994260 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.998253 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.998437 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.998560 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.998715 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:15 crc kubenswrapper[4923]: I1128 11:09:15.998843 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:15Z","lastTransitionTime":"2025-11-28T11:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:16 crc kubenswrapper[4923]: E1128 11:09:16.000793 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:16 crc kubenswrapper[4923]: E1128 11:09:16.001028 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.006349 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" event={"ID":"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0","Type":"ContainerStarted","Data":"3f90a5608dca4e71887975960683dda08b1b5e01f598af251663a968bb7fe56c"} Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.046712 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"message\\\":\\\"tors_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.189:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d389393c-7ba9-422c-b3f5-06e391d537d2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 11:09:11.808965 6262 obj_retry.go:409] Going to retry *v1.Pod resource setup for 10 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-multus/multus-additional-cni-plugins-9gjj9 openshift-network-console/networking-console-plugin-85b44fc459-gdk6g openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-9qvkm openshift-network-node-identity/network-node-identity-vrzqb openshift-machine-config-operator/machine-config-daemon-bwdth openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-diagnostics/network-check-target-xd92c openshift-network-operator/iptables-alerter-4ln5h]\\\\nI1128 11:09:11.809487 6262 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nF1128 11:09:11.809540 6262 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:16Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.062765 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69bb796e49d5ca00e472f027f1443316695a4e243faff1eec26bc13d67bbc60a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f90a5608dca4e71887975960683dda08b1b5e01f598af251663a968bb7fe56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:16Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.084103 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:16Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.103200 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:16Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.103310 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.103481 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.103498 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.103520 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.103536 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:16Z","lastTransitionTime":"2025-11-28T11:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.122419 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:16Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.141458 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:16Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.160136 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:16Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.174658 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:16Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.186799 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:16Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.201742 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b483d037-b692-45d5-bb83-02e029649100\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-g2kmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:16Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.205925 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.206035 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.206091 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.206113 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.206130 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:16Z","lastTransitionTime":"2025-11-28T11:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.220139 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:16Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.238202 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:16Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.260466 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:16Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.274712 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:16Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.295406 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:16Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.308506 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.308564 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.308583 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.308607 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.308626 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:16Z","lastTransitionTime":"2025-11-28T11:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.318031 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:16Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.411335 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.411399 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.411416 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.411441 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.411460 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:16Z","lastTransitionTime":"2025-11-28T11:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.514280 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.514330 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.514346 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.514371 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.514390 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:16Z","lastTransitionTime":"2025-11-28T11:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.528159 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs\") pod \"network-metrics-daemon-g2kmb\" (UID: \"b483d037-b692-45d5-bb83-02e029649100\") " pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:16 crc kubenswrapper[4923]: E1128 11:09:16.528372 4923 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 11:09:16 crc kubenswrapper[4923]: E1128 11:09:16.528457 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs podName:b483d037-b692-45d5-bb83-02e029649100 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:18.528433126 +0000 UTC m=+37.657117366 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs") pod "network-metrics-daemon-g2kmb" (UID: "b483d037-b692-45d5-bb83-02e029649100") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.616664 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.616713 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.616724 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.616771 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.616786 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:16Z","lastTransitionTime":"2025-11-28T11:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.720220 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.720552 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.720705 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.720858 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.721052 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:16Z","lastTransitionTime":"2025-11-28T11:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.824100 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.824145 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.824162 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.824188 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.824206 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:16Z","lastTransitionTime":"2025-11-28T11:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.927572 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.927626 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.927643 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.927664 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:16 crc kubenswrapper[4923]: I1128 11:09:16.927682 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:16Z","lastTransitionTime":"2025-11-28T11:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.030789 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.030839 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.030856 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.030880 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.030897 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:17Z","lastTransitionTime":"2025-11-28T11:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.134905 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.134995 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.135012 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.135036 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.135055 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:17Z","lastTransitionTime":"2025-11-28T11:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.168721 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:17 crc kubenswrapper[4923]: E1128 11:09:17.168890 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.237975 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.238034 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.238051 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.238074 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.238092 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:17Z","lastTransitionTime":"2025-11-28T11:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.340506 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.340563 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.340587 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.340617 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.340640 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:17Z","lastTransitionTime":"2025-11-28T11:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.443722 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.444093 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.444237 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.444382 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.444524 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:17Z","lastTransitionTime":"2025-11-28T11:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.553262 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.553298 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.553309 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.553326 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.553338 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:17Z","lastTransitionTime":"2025-11-28T11:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.656674 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.656742 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.656763 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.656790 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.656815 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:17Z","lastTransitionTime":"2025-11-28T11:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.759883 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.759968 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.759987 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.760011 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.760027 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:17Z","lastTransitionTime":"2025-11-28T11:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.862023 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.862056 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.862067 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.862082 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.862093 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:17Z","lastTransitionTime":"2025-11-28T11:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.964785 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.964836 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.964859 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.964885 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:17 crc kubenswrapper[4923]: I1128 11:09:17.964902 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:17Z","lastTransitionTime":"2025-11-28T11:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.066608 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.066671 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.066693 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.066724 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.066744 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:18Z","lastTransitionTime":"2025-11-28T11:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.167877 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.167904 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.168054 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:18 crc kubenswrapper[4923]: E1128 11:09:18.168195 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:18 crc kubenswrapper[4923]: E1128 11:09:18.168323 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:18 crc kubenswrapper[4923]: E1128 11:09:18.168444 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.169748 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.169806 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.169824 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.169847 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.169869 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:18Z","lastTransitionTime":"2025-11-28T11:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.273039 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.273097 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.273114 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.273140 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.273156 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:18Z","lastTransitionTime":"2025-11-28T11:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.375996 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.376028 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.376036 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.376047 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.376056 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:18Z","lastTransitionTime":"2025-11-28T11:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.479152 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.479203 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.479220 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.479247 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.479264 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:18Z","lastTransitionTime":"2025-11-28T11:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.564353 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs\") pod \"network-metrics-daemon-g2kmb\" (UID: \"b483d037-b692-45d5-bb83-02e029649100\") " pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:18 crc kubenswrapper[4923]: E1128 11:09:18.564550 4923 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 11:09:18 crc kubenswrapper[4923]: E1128 11:09:18.564661 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs podName:b483d037-b692-45d5-bb83-02e029649100 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:22.564631709 +0000 UTC m=+41.693315959 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs") pod "network-metrics-daemon-g2kmb" (UID: "b483d037-b692-45d5-bb83-02e029649100") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.581776 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.581825 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.581844 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.581868 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.581884 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:18Z","lastTransitionTime":"2025-11-28T11:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.685344 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.685402 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.685419 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.685443 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.685461 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:18Z","lastTransitionTime":"2025-11-28T11:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.788304 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.788355 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.788367 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.788390 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.788402 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:18Z","lastTransitionTime":"2025-11-28T11:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.891316 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.891383 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.891434 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.891461 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.891480 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:18Z","lastTransitionTime":"2025-11-28T11:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.994587 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.994652 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.994668 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.994691 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:18 crc kubenswrapper[4923]: I1128 11:09:18.994710 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:18Z","lastTransitionTime":"2025-11-28T11:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.097902 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.098004 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.098022 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.098045 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.098063 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:19Z","lastTransitionTime":"2025-11-28T11:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.168107 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:19 crc kubenswrapper[4923]: E1128 11:09:19.168302 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.201448 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.201485 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.201493 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.201507 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.201517 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:19Z","lastTransitionTime":"2025-11-28T11:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.305055 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.305117 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.305140 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.305170 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.305191 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:19Z","lastTransitionTime":"2025-11-28T11:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.408271 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.408402 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.408422 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.408446 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.408464 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:19Z","lastTransitionTime":"2025-11-28T11:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.511248 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.511371 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.511393 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.511425 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.511449 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:19Z","lastTransitionTime":"2025-11-28T11:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.613864 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.613926 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.613968 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.613993 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.614009 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:19Z","lastTransitionTime":"2025-11-28T11:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.716532 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.716584 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.716602 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.716624 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.716642 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:19Z","lastTransitionTime":"2025-11-28T11:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.820095 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.820160 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.820186 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.820217 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.820243 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:19Z","lastTransitionTime":"2025-11-28T11:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.860328 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.860431 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.860454 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.860480 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.860499 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:19Z","lastTransitionTime":"2025-11-28T11:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:19 crc kubenswrapper[4923]: E1128 11:09:19.880448 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:19Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.885824 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.885875 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.885892 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.885916 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.885958 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:19Z","lastTransitionTime":"2025-11-28T11:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:19 crc kubenswrapper[4923]: E1128 11:09:19.906481 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:19Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.911731 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.911799 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.911816 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.911842 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.911861 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:19Z","lastTransitionTime":"2025-11-28T11:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:19 crc kubenswrapper[4923]: E1128 11:09:19.931165 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:19Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.936128 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.936316 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.936453 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.936611 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.936755 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:19Z","lastTransitionTime":"2025-11-28T11:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:19 crc kubenswrapper[4923]: E1128 11:09:19.955805 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:19Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.960702 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.961020 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.961057 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.961087 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.961116 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:19Z","lastTransitionTime":"2025-11-28T11:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:19 crc kubenswrapper[4923]: E1128 11:09:19.980588 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:19Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:19 crc kubenswrapper[4923]: E1128 11:09:19.980812 4923 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.982793 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.983033 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.983164 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.983339 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:19 crc kubenswrapper[4923]: I1128 11:09:19.983469 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:19Z","lastTransitionTime":"2025-11-28T11:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.085455 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.085505 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.085523 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.085545 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.085563 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:20Z","lastTransitionTime":"2025-11-28T11:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.167649 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.167792 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.167834 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:20 crc kubenswrapper[4923]: E1128 11:09:20.168118 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:20 crc kubenswrapper[4923]: E1128 11:09:20.168870 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:20 crc kubenswrapper[4923]: E1128 11:09:20.169555 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.188786 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.188843 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.188859 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.188882 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.188899 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:20Z","lastTransitionTime":"2025-11-28T11:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.292260 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.292337 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.292356 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.292381 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.292398 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:20Z","lastTransitionTime":"2025-11-28T11:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.396979 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.397049 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.397069 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.397096 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.397114 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:20Z","lastTransitionTime":"2025-11-28T11:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.500218 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.500276 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.500299 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.500330 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.500350 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:20Z","lastTransitionTime":"2025-11-28T11:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.603477 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.603916 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.604213 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.604436 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.604692 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:20Z","lastTransitionTime":"2025-11-28T11:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.708409 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.708461 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.708477 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.708500 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.708516 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:20Z","lastTransitionTime":"2025-11-28T11:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.812128 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.812194 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.812217 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.812247 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.812267 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:20Z","lastTransitionTime":"2025-11-28T11:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.915252 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.915342 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.915361 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.915383 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:20 crc kubenswrapper[4923]: I1128 11:09:20.915399 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:20Z","lastTransitionTime":"2025-11-28T11:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.018908 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.018980 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.018992 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.019038 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.019050 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:21Z","lastTransitionTime":"2025-11-28T11:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.122263 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.122321 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.122338 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.122362 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.122380 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:21Z","lastTransitionTime":"2025-11-28T11:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.169062 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:21 crc kubenswrapper[4923]: E1128 11:09:21.169620 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.193644 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:21Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.214252 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:21Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.225417 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.225464 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.225480 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.225504 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.225520 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:21Z","lastTransitionTime":"2025-11-28T11:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.234859 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:21Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.249841 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:21Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.271872 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b483d037-b692-45d5-bb83-02e029649100\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-g2kmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:21Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.292106 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:21Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.312985 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:21Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.333479 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.333575 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.333608 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.334163 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.334200 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:21Z","lastTransitionTime":"2025-11-28T11:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.335526 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69bb796e49d5ca00e472f027f1443316695a4e243faff1eec26bc13d67bbc60a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f90a5608dca4e71887975960683dda08b1b5e01f598af251663a968bb7fe56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:21Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.357523 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:21Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.377779 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:21Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.395794 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:21Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.416283 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:21Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.434779 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:21Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.438380 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.438431 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.438448 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.438471 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.438492 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:21Z","lastTransitionTime":"2025-11-28T11:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.452108 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:21Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.484569 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"message\\\":\\\"tors_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.189:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d389393c-7ba9-422c-b3f5-06e391d537d2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 11:09:11.808965 6262 obj_retry.go:409] Going to retry *v1.Pod resource setup for 10 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-multus/multus-additional-cni-plugins-9gjj9 openshift-network-console/networking-console-plugin-85b44fc459-gdk6g openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-9qvkm openshift-network-node-identity/network-node-identity-vrzqb openshift-machine-config-operator/machine-config-daemon-bwdth openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-diagnostics/network-check-target-xd92c openshift-network-operator/iptables-alerter-4ln5h]\\\\nI1128 11:09:11.809487 6262 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nF1128 11:09:11.809540 6262 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:21Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.501367 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:21Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.541872 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.542323 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.542656 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.542819 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.543004 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:21Z","lastTransitionTime":"2025-11-28T11:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.645715 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.645972 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.646129 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.646263 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.646391 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:21Z","lastTransitionTime":"2025-11-28T11:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.749107 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.749154 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.749171 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.749193 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.749209 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:21Z","lastTransitionTime":"2025-11-28T11:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.852663 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.853025 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.853171 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.853334 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.853466 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:21Z","lastTransitionTime":"2025-11-28T11:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.956715 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.956779 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.956796 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.956822 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:21 crc kubenswrapper[4923]: I1128 11:09:21.956842 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:21Z","lastTransitionTime":"2025-11-28T11:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.059066 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.059140 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.059163 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.059192 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.059214 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:22Z","lastTransitionTime":"2025-11-28T11:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.161676 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.161736 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.161753 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.161777 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.161796 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:22Z","lastTransitionTime":"2025-11-28T11:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.167924 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.168028 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:22 crc kubenswrapper[4923]: E1128 11:09:22.168175 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.168246 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:22 crc kubenswrapper[4923]: E1128 11:09:22.168422 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:22 crc kubenswrapper[4923]: E1128 11:09:22.168591 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.264929 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.265049 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.265089 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.265114 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.265133 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:22Z","lastTransitionTime":"2025-11-28T11:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.368363 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.368420 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.368476 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.368500 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.368594 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:22Z","lastTransitionTime":"2025-11-28T11:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.471455 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.471504 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.471520 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.471543 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.471564 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:22Z","lastTransitionTime":"2025-11-28T11:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.574819 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.574879 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.574899 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.574924 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.574968 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:22Z","lastTransitionTime":"2025-11-28T11:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.614021 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs\") pod \"network-metrics-daemon-g2kmb\" (UID: \"b483d037-b692-45d5-bb83-02e029649100\") " pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:22 crc kubenswrapper[4923]: E1128 11:09:22.621457 4923 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 11:09:22 crc kubenswrapper[4923]: E1128 11:09:22.621747 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs podName:b483d037-b692-45d5-bb83-02e029649100 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:30.621639372 +0000 UTC m=+49.750323622 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs") pod "network-metrics-daemon-g2kmb" (UID: "b483d037-b692-45d5-bb83-02e029649100") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.678481 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.678550 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.678569 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.678597 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.678615 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:22Z","lastTransitionTime":"2025-11-28T11:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.781431 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.781484 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.781501 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.781525 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.781542 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:22Z","lastTransitionTime":"2025-11-28T11:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.884682 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.884740 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.884757 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.884780 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.884796 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:22Z","lastTransitionTime":"2025-11-28T11:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.987540 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.987600 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.987618 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.987643 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:22 crc kubenswrapper[4923]: I1128 11:09:22.987660 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:22Z","lastTransitionTime":"2025-11-28T11:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.091474 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.091545 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.091564 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.091591 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.091609 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:23Z","lastTransitionTime":"2025-11-28T11:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.168237 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:23 crc kubenswrapper[4923]: E1128 11:09:23.168484 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.194375 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.194442 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.194465 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.194493 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.194514 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:23Z","lastTransitionTime":"2025-11-28T11:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.297525 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.297801 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.298045 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.298254 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.298415 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:23Z","lastTransitionTime":"2025-11-28T11:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.401302 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.401340 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.401357 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.401376 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.401392 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:23Z","lastTransitionTime":"2025-11-28T11:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.504836 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.505288 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.505466 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.505607 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.505749 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:23Z","lastTransitionTime":"2025-11-28T11:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.609181 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.609246 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.609263 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.609289 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.609307 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:23Z","lastTransitionTime":"2025-11-28T11:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.712687 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.713121 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.713301 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.713439 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.713583 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:23Z","lastTransitionTime":"2025-11-28T11:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.817121 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.818026 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.818056 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.818081 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.818098 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:23Z","lastTransitionTime":"2025-11-28T11:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.920923 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.921002 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.921021 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.921043 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:23 crc kubenswrapper[4923]: I1128 11:09:23.921059 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:23Z","lastTransitionTime":"2025-11-28T11:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.023727 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.023796 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.023815 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.023840 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.023862 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:24Z","lastTransitionTime":"2025-11-28T11:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.127195 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.127242 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.127258 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.127283 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.127301 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:24Z","lastTransitionTime":"2025-11-28T11:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.168018 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.168084 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.168018 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:24 crc kubenswrapper[4923]: E1128 11:09:24.168207 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:24 crc kubenswrapper[4923]: E1128 11:09:24.168361 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:24 crc kubenswrapper[4923]: E1128 11:09:24.168499 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.230777 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.230876 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.230920 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.230980 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.231000 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:24Z","lastTransitionTime":"2025-11-28T11:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.333857 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.333988 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.334009 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.334031 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.334076 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:24Z","lastTransitionTime":"2025-11-28T11:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.436666 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.436726 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.436742 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.436764 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.436783 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:24Z","lastTransitionTime":"2025-11-28T11:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.540233 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.540326 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.540344 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.540417 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.540434 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:24Z","lastTransitionTime":"2025-11-28T11:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.643643 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.643695 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.643718 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.643747 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.643768 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:24Z","lastTransitionTime":"2025-11-28T11:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.745804 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.745868 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.745887 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.745911 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.745928 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:24Z","lastTransitionTime":"2025-11-28T11:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.848995 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.849083 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.849108 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.849137 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.849159 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:24Z","lastTransitionTime":"2025-11-28T11:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.952182 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.952256 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.952279 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.952311 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:24 crc kubenswrapper[4923]: I1128 11:09:24.952334 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:24Z","lastTransitionTime":"2025-11-28T11:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.055461 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.055533 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.055558 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.055588 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.055609 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:25Z","lastTransitionTime":"2025-11-28T11:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.158230 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.158340 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.158367 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.158402 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.158432 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:25Z","lastTransitionTime":"2025-11-28T11:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.168747 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:25 crc kubenswrapper[4923]: E1128 11:09:25.168990 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.262165 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.262237 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.262293 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.262322 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.262341 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:25Z","lastTransitionTime":"2025-11-28T11:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.364925 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.365055 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.365077 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.365109 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.365131 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:25Z","lastTransitionTime":"2025-11-28T11:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.467969 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.468032 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.468045 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.468060 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.468071 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:25Z","lastTransitionTime":"2025-11-28T11:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.570745 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.570806 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.570823 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.570846 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.570866 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:25Z","lastTransitionTime":"2025-11-28T11:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.673821 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.673882 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.673899 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.673924 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.673970 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:25Z","lastTransitionTime":"2025-11-28T11:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.777035 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.777174 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.777196 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.777223 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.777286 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:25Z","lastTransitionTime":"2025-11-28T11:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.880678 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.880743 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.880760 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.880788 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.880809 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:25Z","lastTransitionTime":"2025-11-28T11:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.983987 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.984071 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.984112 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.984147 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:25 crc kubenswrapper[4923]: I1128 11:09:25.984171 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:25Z","lastTransitionTime":"2025-11-28T11:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.086746 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.086821 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.086843 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.086868 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.086885 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:26Z","lastTransitionTime":"2025-11-28T11:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.167888 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.167920 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.168002 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:26 crc kubenswrapper[4923]: E1128 11:09:26.168371 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:26 crc kubenswrapper[4923]: E1128 11:09:26.168554 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:26 crc kubenswrapper[4923]: E1128 11:09:26.168746 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.189785 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.189876 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.189898 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.189920 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.189996 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:26Z","lastTransitionTime":"2025-11-28T11:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.293157 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.293255 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.293273 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.293329 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.293355 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:26Z","lastTransitionTime":"2025-11-28T11:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.396185 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.396234 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.396252 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.396278 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.396297 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:26Z","lastTransitionTime":"2025-11-28T11:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.499566 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.499628 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.499650 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.499680 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.499701 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:26Z","lastTransitionTime":"2025-11-28T11:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.602034 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.602071 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.602083 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.602100 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.602114 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:26Z","lastTransitionTime":"2025-11-28T11:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.705314 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.705376 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.705395 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.705421 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.705441 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:26Z","lastTransitionTime":"2025-11-28T11:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.808581 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.808633 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.808651 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.808674 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.808691 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:26Z","lastTransitionTime":"2025-11-28T11:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.911437 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.911487 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.911504 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.911528 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:26 crc kubenswrapper[4923]: I1128 11:09:26.911544 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:26Z","lastTransitionTime":"2025-11-28T11:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.014691 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.014751 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.014772 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.014806 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.014827 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:27Z","lastTransitionTime":"2025-11-28T11:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.117472 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.117519 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.117534 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.117557 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.117574 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:27Z","lastTransitionTime":"2025-11-28T11:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.168384 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:27 crc kubenswrapper[4923]: E1128 11:09:27.168551 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.221033 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.221093 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.221110 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.221139 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.221157 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:27Z","lastTransitionTime":"2025-11-28T11:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.323758 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.323824 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.323840 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.323866 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.323885 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:27Z","lastTransitionTime":"2025-11-28T11:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.346727 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.360190 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.372035 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:27Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.393136 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:27Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.418753 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:27Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.427022 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.427056 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.427067 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.427084 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.427098 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:27Z","lastTransitionTime":"2025-11-28T11:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.441325 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"message\\\":\\\"tors_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.189:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d389393c-7ba9-422c-b3f5-06e391d537d2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 11:09:11.808965 6262 obj_retry.go:409] Going to retry *v1.Pod resource setup for 10 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-multus/multus-additional-cni-plugins-9gjj9 openshift-network-console/networking-console-plugin-85b44fc459-gdk6g openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-9qvkm openshift-network-node-identity/network-node-identity-vrzqb openshift-machine-config-operator/machine-config-daemon-bwdth openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-diagnostics/network-check-target-xd92c openshift-network-operator/iptables-alerter-4ln5h]\\\\nI1128 11:09:11.809487 6262 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nF1128 11:09:11.809540 6262 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:27Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.454182 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69bb796e49d5ca00e472f027f1443316695a4e243faff1eec26bc13d67bbc60a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f90a5608dca4e71887975960683dda08b1b5e01f598af251663a968bb7fe56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:27Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.467601 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:27Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.479896 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:27Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.494289 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:27Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.505043 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:27Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.520752 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:27Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.530671 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.530711 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.530723 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.530743 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.530756 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:27Z","lastTransitionTime":"2025-11-28T11:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.536030 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:27Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.545496 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:27Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.555365 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b483d037-b692-45d5-bb83-02e029649100\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-g2kmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:27Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.566660 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:27Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.582905 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:27Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.600506 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:27Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.633598 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.633709 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.633727 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.633751 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.633767 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:27Z","lastTransitionTime":"2025-11-28T11:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.736666 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.736748 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.736771 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.736804 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.736827 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:27Z","lastTransitionTime":"2025-11-28T11:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.839660 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.839730 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.839747 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.839773 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.839791 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:27Z","lastTransitionTime":"2025-11-28T11:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.942677 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.942781 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.942812 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.942843 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:27 crc kubenswrapper[4923]: I1128 11:09:27.942865 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:27Z","lastTransitionTime":"2025-11-28T11:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.047341 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.047393 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.047412 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.047437 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.047456 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:28Z","lastTransitionTime":"2025-11-28T11:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.151395 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.151456 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.151476 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.151501 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.151519 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:28Z","lastTransitionTime":"2025-11-28T11:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.167666 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.167779 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:28 crc kubenswrapper[4923]: E1128 11:09:28.167851 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.167691 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:28 crc kubenswrapper[4923]: E1128 11:09:28.167996 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:28 crc kubenswrapper[4923]: E1128 11:09:28.168125 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.254537 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.254605 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.254622 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.254646 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.254664 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:28Z","lastTransitionTime":"2025-11-28T11:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.357376 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.357481 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.357500 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.357562 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.357583 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:28Z","lastTransitionTime":"2025-11-28T11:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.460301 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.460649 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.460784 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.460957 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.461124 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:28Z","lastTransitionTime":"2025-11-28T11:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.565240 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.565717 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.565928 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.566109 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.566264 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:28Z","lastTransitionTime":"2025-11-28T11:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.669862 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.670294 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.670456 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.670585 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.670701 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:28Z","lastTransitionTime":"2025-11-28T11:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.774445 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.774507 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.774525 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.774558 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.774586 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:28Z","lastTransitionTime":"2025-11-28T11:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.877297 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.877352 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.877370 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.877393 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.877410 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:28Z","lastTransitionTime":"2025-11-28T11:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.980033 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.980091 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.980109 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.980133 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:28 crc kubenswrapper[4923]: I1128 11:09:28.980152 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:28Z","lastTransitionTime":"2025-11-28T11:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.083013 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.083068 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.083084 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.083109 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.083126 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:29Z","lastTransitionTime":"2025-11-28T11:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.167856 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:29 crc kubenswrapper[4923]: E1128 11:09:29.168125 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.186668 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.186725 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.186746 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.186776 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.186799 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:29Z","lastTransitionTime":"2025-11-28T11:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.289126 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.289177 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.289193 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.289215 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.289231 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:29Z","lastTransitionTime":"2025-11-28T11:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.392825 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.392892 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.392917 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.392986 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.393012 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:29Z","lastTransitionTime":"2025-11-28T11:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.496561 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.496686 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.496714 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.496743 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.496764 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:29Z","lastTransitionTime":"2025-11-28T11:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.599301 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.599370 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.599416 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.599440 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.599458 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:29Z","lastTransitionTime":"2025-11-28T11:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.702496 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.702565 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.702583 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.702608 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.702626 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:29Z","lastTransitionTime":"2025-11-28T11:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.805135 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.805193 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.805212 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.805237 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.805254 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:29Z","lastTransitionTime":"2025-11-28T11:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.908160 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.908237 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.908259 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.908282 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:29 crc kubenswrapper[4923]: I1128 11:09:29.908328 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:29Z","lastTransitionTime":"2025-11-28T11:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.011616 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.011687 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.011704 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.011728 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.011745 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:30Z","lastTransitionTime":"2025-11-28T11:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.075475 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.075521 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.075539 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.075561 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.075577 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:30Z","lastTransitionTime":"2025-11-28T11:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:30 crc kubenswrapper[4923]: E1128 11:09:30.096826 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:30Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.101725 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.102334 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.102515 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.102698 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.102809 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:30Z","lastTransitionTime":"2025-11-28T11:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:30 crc kubenswrapper[4923]: E1128 11:09:30.121242 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:30Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.125477 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.125625 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.125694 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.125761 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.125833 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:30Z","lastTransitionTime":"2025-11-28T11:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:30 crc kubenswrapper[4923]: E1128 11:09:30.143550 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:30Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.147518 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.147650 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.147716 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.147784 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.147858 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:30Z","lastTransitionTime":"2025-11-28T11:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:30 crc kubenswrapper[4923]: E1128 11:09:30.167284 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:30Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.168448 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.168449 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.168521 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:30 crc kubenswrapper[4923]: E1128 11:09:30.168636 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:30 crc kubenswrapper[4923]: E1128 11:09:30.168811 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:30 crc kubenswrapper[4923]: E1128 11:09:30.168964 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.169122 4923 scope.go:117] "RemoveContainer" containerID="bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.173747 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.173793 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.173809 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.173831 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.173847 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:30Z","lastTransitionTime":"2025-11-28T11:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:30 crc kubenswrapper[4923]: E1128 11:09:30.196304 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:30Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:30 crc kubenswrapper[4923]: E1128 11:09:30.197400 4923 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.202642 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.202718 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.202781 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.202843 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.202915 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:30Z","lastTransitionTime":"2025-11-28T11:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.308956 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.309009 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.309024 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.309045 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.309061 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:30Z","lastTransitionTime":"2025-11-28T11:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.412080 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.412126 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.412141 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.412160 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.412175 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:30Z","lastTransitionTime":"2025-11-28T11:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.514339 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.514380 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.514424 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.514443 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.514454 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:30Z","lastTransitionTime":"2025-11-28T11:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.616623 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.616682 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.616696 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.616717 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.616732 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:30Z","lastTransitionTime":"2025-11-28T11:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.710353 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs\") pod \"network-metrics-daemon-g2kmb\" (UID: \"b483d037-b692-45d5-bb83-02e029649100\") " pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:30 crc kubenswrapper[4923]: E1128 11:09:30.710579 4923 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 11:09:30 crc kubenswrapper[4923]: E1128 11:09:30.710674 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs podName:b483d037-b692-45d5-bb83-02e029649100 nodeName:}" failed. No retries permitted until 2025-11-28 11:09:46.71064785 +0000 UTC m=+65.839332110 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs") pod "network-metrics-daemon-g2kmb" (UID: "b483d037-b692-45d5-bb83-02e029649100") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.718959 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.719005 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.719020 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.719041 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.719056 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:30Z","lastTransitionTime":"2025-11-28T11:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.821719 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.821772 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.821788 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.821812 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.821829 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:30Z","lastTransitionTime":"2025-11-28T11:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.911861 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.912002 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.912039 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:30 crc kubenswrapper[4923]: E1128 11:09:30.912132 4923 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 11:09:30 crc kubenswrapper[4923]: E1128 11:09:30.912190 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:02.912160468 +0000 UTC m=+82.040844688 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:09:30 crc kubenswrapper[4923]: E1128 11:09:30.912228 4923 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 11:09:30 crc kubenswrapper[4923]: E1128 11:09:30.912243 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:02.912223099 +0000 UTC m=+82.040907329 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 11:09:30 crc kubenswrapper[4923]: E1128 11:09:30.912356 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:02.912341453 +0000 UTC m=+82.041025673 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.924537 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.924576 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.924587 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.924603 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:30 crc kubenswrapper[4923]: I1128 11:09:30.924615 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:30Z","lastTransitionTime":"2025-11-28T11:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.012732 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:31 crc kubenswrapper[4923]: E1128 11:09:31.012895 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 11:09:31 crc kubenswrapper[4923]: E1128 11:09:31.012923 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 11:09:31 crc kubenswrapper[4923]: E1128 11:09:31.012949 4923 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:31 crc kubenswrapper[4923]: E1128 11:09:31.013002 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:03.012987599 +0000 UTC m=+82.141671809 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.026635 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.026665 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.026673 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.026685 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.026696 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:31Z","lastTransitionTime":"2025-11-28T11:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.061419 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovnkube-controller/1.log" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.064508 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerStarted","Data":"026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee"} Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.064969 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.079921 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.093916 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.104664 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3af1089a-5262-4fa0-85fb-9f992ee6274d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://403d762c4ba4c4f3309ef1b447be25f7882da8a2d03b9376711063165438294f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3513d0400c621295e074b54a00fe7f284c38bebd8e7f11315db91fef9a2a4693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81443f6c4751860dce1d5ecf0f867a1c9641a989cbfd171e71de418f738108c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.113831 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:31 crc kubenswrapper[4923]: E1128 11:09:31.114014 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 11:09:31 crc kubenswrapper[4923]: E1128 11:09:31.114032 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 11:09:31 crc kubenswrapper[4923]: E1128 11:09:31.114047 4923 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:31 crc kubenswrapper[4923]: E1128 11:09:31.114095 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:03.114079327 +0000 UTC m=+82.242763557 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.114198 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.128552 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.128750 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.128759 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.128771 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.128780 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:31Z","lastTransitionTime":"2025-11-28T11:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.129415 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.145314 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.158947 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.168265 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:31 crc kubenswrapper[4923]: E1128 11:09:31.168446 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.190562 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"message\\\":\\\"tors_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.189:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d389393c-7ba9-422c-b3f5-06e391d537d2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 11:09:11.808965 6262 obj_retry.go:409] Going to retry *v1.Pod resource setup for 10 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-multus/multus-additional-cni-plugins-9gjj9 openshift-network-console/networking-console-plugin-85b44fc459-gdk6g openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-9qvkm openshift-network-node-identity/network-node-identity-vrzqb openshift-machine-config-operator/machine-config-daemon-bwdth openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-diagnostics/network-check-target-xd92c openshift-network-operator/iptables-alerter-4ln5h]\\\\nI1128 11:09:11.809487 6262 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nF1128 11:09:11.809540 6262 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.214436 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69bb796e49d5ca00e472f027f1443316695a4e243faff1eec26bc13d67bbc60a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f90a5608dca4e71887975960683dda08b1b5e01f598af251663a968bb7fe56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.231118 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.231157 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.231170 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.231188 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.231203 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:31Z","lastTransitionTime":"2025-11-28T11:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.236297 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.261477 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.275815 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.294370 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.311809 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.331182 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.333911 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.334012 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.334037 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.334070 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.334091 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:31Z","lastTransitionTime":"2025-11-28T11:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.344998 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.360017 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b483d037-b692-45d5-bb83-02e029649100\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-g2kmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.372272 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.393372 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.406432 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.419314 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b483d037-b692-45d5-bb83-02e029649100\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-g2kmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.441358 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.441601 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.441686 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.441800 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.441890 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:31Z","lastTransitionTime":"2025-11-28T11:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.442755 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.460278 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.474880 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.493103 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3af1089a-5262-4fa0-85fb-9f992ee6274d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://403d762c4ba4c4f3309ef1b447be25f7882da8a2d03b9376711063165438294f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3513d0400c621295e074b54a00fe7f284c38bebd8e7f11315db91fef9a2a4693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81443f6c4751860dce1d5ecf0f867a1c9641a989cbfd171e71de418f738108c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.510759 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.527739 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.542795 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.545681 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.545750 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.545769 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.545797 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.545815 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:31Z","lastTransitionTime":"2025-11-28T11:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.563926 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"message\\\":\\\"tors_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.189:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d389393c-7ba9-422c-b3f5-06e391d537d2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 11:09:11.808965 6262 obj_retry.go:409] Going to retry *v1.Pod resource setup for 10 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-multus/multus-additional-cni-plugins-9gjj9 openshift-network-console/networking-console-plugin-85b44fc459-gdk6g openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-9qvkm openshift-network-node-identity/network-node-identity-vrzqb openshift-machine-config-operator/machine-config-daemon-bwdth openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-diagnostics/network-check-target-xd92c openshift-network-operator/iptables-alerter-4ln5h]\\\\nI1128 11:09:11.809487 6262 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nF1128 11:09:11.809540 6262 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.581125 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69bb796e49d5ca00e472f027f1443316695a4e243faff1eec26bc13d67bbc60a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f90a5608dca4e71887975960683dda08b1b5e01f598af251663a968bb7fe56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.601415 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.621801 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.639414 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.647981 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.648021 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.648034 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.648050 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.648062 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:31Z","lastTransitionTime":"2025-11-28T11:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.658197 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:31Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.751469 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.751516 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.751532 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.751560 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.751576 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:31Z","lastTransitionTime":"2025-11-28T11:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.855072 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.855488 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.855667 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.855813 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.855992 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:31Z","lastTransitionTime":"2025-11-28T11:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.958897 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.959008 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.959037 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.959070 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:31 crc kubenswrapper[4923]: I1128 11:09:31.959091 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:31Z","lastTransitionTime":"2025-11-28T11:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.061679 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.061740 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.061757 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.061779 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.061795 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:32Z","lastTransitionTime":"2025-11-28T11:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.069319 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovnkube-controller/2.log" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.070169 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovnkube-controller/1.log" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.078993 4923 generic.go:334] "Generic (PLEG): container finished" podID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerID="026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee" exitCode=1 Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.079045 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerDied","Data":"026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee"} Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.079100 4923 scope.go:117] "RemoveContainer" containerID="bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.080317 4923 scope.go:117] "RemoveContainer" containerID="026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee" Nov 28 11:09:32 crc kubenswrapper[4923]: E1128 11:09:32.080669 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\"" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.100850 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:32Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.129761 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:32Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.153050 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:32Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.164856 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.164899 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.164917 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.164978 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.165020 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:32Z","lastTransitionTime":"2025-11-28T11:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.168789 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.168875 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.168820 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:32 crc kubenswrapper[4923]: E1128 11:09:32.169082 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:32 crc kubenswrapper[4923]: E1128 11:09:32.169176 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:32 crc kubenswrapper[4923]: E1128 11:09:32.169284 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.169549 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:32Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.184065 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b483d037-b692-45d5-bb83-02e029649100\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-g2kmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:32Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.206328 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:32Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.225758 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3af1089a-5262-4fa0-85fb-9f992ee6274d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://403d762c4ba4c4f3309ef1b447be25f7882da8a2d03b9376711063165438294f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3513d0400c621295e074b54a00fe7f284c38bebd8e7f11315db91fef9a2a4693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81443f6c4751860dce1d5ecf0f867a1c9641a989cbfd171e71de418f738108c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:32Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.242955 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:32Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.256752 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69bb796e49d5ca00e472f027f1443316695a4e243faff1eec26bc13d67bbc60a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f90a5608dca4e71887975960683dda08b1b5e01f598af251663a968bb7fe56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:32Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.273165 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.273363 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.273510 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.273641 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.273771 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:32Z","lastTransitionTime":"2025-11-28T11:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.280780 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:32Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.298834 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:32Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.317594 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:32Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.335405 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:32Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.354825 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:32Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.375873 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:32Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.377673 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.377914 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.378139 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.378336 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.378528 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:32Z","lastTransitionTime":"2025-11-28T11:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.411437 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bbada82e4374cddba43b2570877b1a338c03bcc8b3691cb2cfce9c5e59d8f271\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"message\\\":\\\"tors_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.189:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d389393c-7ba9-422c-b3f5-06e391d537d2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 11:09:11.808965 6262 obj_retry.go:409] Going to retry *v1.Pod resource setup for 10 objects: [openshift-kube-apiserver/kube-apiserver-crc openshift-multus/multus-additional-cni-plugins-9gjj9 openshift-network-console/networking-console-plugin-85b44fc459-gdk6g openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-9qvkm openshift-network-node-identity/network-node-identity-vrzqb openshift-machine-config-operator/machine-config-daemon-bwdth openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-diagnostics/network-check-target-xd92c openshift-network-operator/iptables-alerter-4ln5h]\\\\nI1128 11:09:11.809487 6262 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nF1128 11:09:11.809540 6262 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:31Z\\\",\\\"message\\\":\\\"c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3b-4ff8-8926-04ac25de450e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 11:09:31.089339 6468 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 11:09:31.089404 6468 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:32Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.427534 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:32Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.481333 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.481402 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.481429 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.481460 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.481483 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:32Z","lastTransitionTime":"2025-11-28T11:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.585812 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.585856 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.585875 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.585900 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.585917 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:32Z","lastTransitionTime":"2025-11-28T11:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.688451 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.688523 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.688541 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.688567 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.688588 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:32Z","lastTransitionTime":"2025-11-28T11:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.791119 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.791193 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.791209 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.791238 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.791261 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:32Z","lastTransitionTime":"2025-11-28T11:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.894193 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.894263 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.894280 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.894304 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.894322 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:32Z","lastTransitionTime":"2025-11-28T11:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.997427 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.997758 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.997901 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.998083 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:32 crc kubenswrapper[4923]: I1128 11:09:32.998206 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:32Z","lastTransitionTime":"2025-11-28T11:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.085473 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovnkube-controller/2.log" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.090696 4923 scope.go:117] "RemoveContainer" containerID="026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee" Nov 28 11:09:33 crc kubenswrapper[4923]: E1128 11:09:33.091060 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\"" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.119186 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.119239 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.119260 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.119283 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.119300 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:33Z","lastTransitionTime":"2025-11-28T11:09:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.120634 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:33Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.136001 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:33Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.150721 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b483d037-b692-45d5-bb83-02e029649100\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-g2kmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:33Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.168546 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:33Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.168907 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:33 crc kubenswrapper[4923]: E1128 11:09:33.169233 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.188454 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:33Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.206599 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:33Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.221314 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3af1089a-5262-4fa0-85fb-9f992ee6274d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://403d762c4ba4c4f3309ef1b447be25f7882da8a2d03b9376711063165438294f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3513d0400c621295e074b54a00fe7f284c38bebd8e7f11315db91fef9a2a4693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81443f6c4751860dce1d5ecf0f867a1c9641a989cbfd171e71de418f738108c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:33Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.221908 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.222211 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.222227 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.222241 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.222252 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:33Z","lastTransitionTime":"2025-11-28T11:09:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.238342 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:33Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.256594 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:33Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.273377 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:33Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.301974 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:31Z\\\",\\\"message\\\":\\\"c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3b-4ff8-8926-04ac25de450e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 11:09:31.089339 6468 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 11:09:31.089404 6468 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:33Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.317835 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69bb796e49d5ca00e472f027f1443316695a4e243faff1eec26bc13d67bbc60a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f90a5608dca4e71887975960683dda08b1b5e01f598af251663a968bb7fe56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:33Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.325673 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.325727 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.325746 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.325770 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.325788 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:33Z","lastTransitionTime":"2025-11-28T11:09:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.338416 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:33Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.356752 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:33Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.375156 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:33Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.391731 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:33Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.407189 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:33Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.428305 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.428365 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.428386 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.428415 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.428433 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:33Z","lastTransitionTime":"2025-11-28T11:09:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.539583 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.539650 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.539669 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.539694 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.539710 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:33Z","lastTransitionTime":"2025-11-28T11:09:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.644016 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.644143 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.644162 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.644187 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.644206 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:33Z","lastTransitionTime":"2025-11-28T11:09:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.747021 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.747070 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.747087 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.747114 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.747138 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:33Z","lastTransitionTime":"2025-11-28T11:09:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.850517 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.850565 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.850581 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.850603 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.850620 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:33Z","lastTransitionTime":"2025-11-28T11:09:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.953595 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.953684 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.953702 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.953726 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:33 crc kubenswrapper[4923]: I1128 11:09:33.953745 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:33Z","lastTransitionTime":"2025-11-28T11:09:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.056364 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.056434 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.056454 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.056480 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.056497 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:34Z","lastTransitionTime":"2025-11-28T11:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.159257 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.159306 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.159322 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.159346 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.159363 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:34Z","lastTransitionTime":"2025-11-28T11:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.167750 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:34 crc kubenswrapper[4923]: E1128 11:09:34.167895 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.168178 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.168215 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:34 crc kubenswrapper[4923]: E1128 11:09:34.168283 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:34 crc kubenswrapper[4923]: E1128 11:09:34.168382 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.262507 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.262589 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.262612 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.262642 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.262664 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:34Z","lastTransitionTime":"2025-11-28T11:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.365654 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.365732 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.365752 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.365775 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.365793 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:34Z","lastTransitionTime":"2025-11-28T11:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.468418 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.468467 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.468485 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.468505 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.468523 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:34Z","lastTransitionTime":"2025-11-28T11:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.571828 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.571880 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.571893 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.571915 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.571955 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:34Z","lastTransitionTime":"2025-11-28T11:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.675139 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.675188 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.675204 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.675224 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.675240 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:34Z","lastTransitionTime":"2025-11-28T11:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.778545 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.778618 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.778640 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.778665 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.778685 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:34Z","lastTransitionTime":"2025-11-28T11:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.881855 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.881956 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.881979 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.882005 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.882023 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:34Z","lastTransitionTime":"2025-11-28T11:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.985677 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.985741 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.985762 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.985791 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:34 crc kubenswrapper[4923]: I1128 11:09:34.985810 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:34Z","lastTransitionTime":"2025-11-28T11:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.088881 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.088969 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.088987 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.089015 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.089031 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:35Z","lastTransitionTime":"2025-11-28T11:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.168270 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:35 crc kubenswrapper[4923]: E1128 11:09:35.168455 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.192117 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.192279 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.192308 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.192336 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.192359 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:35Z","lastTransitionTime":"2025-11-28T11:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.295577 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.295756 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.295780 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.295833 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.295862 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:35Z","lastTransitionTime":"2025-11-28T11:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.399619 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.399753 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.399857 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.399914 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.399972 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:35Z","lastTransitionTime":"2025-11-28T11:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.503165 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.503500 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.503521 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.503544 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.503561 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:35Z","lastTransitionTime":"2025-11-28T11:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.607751 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.607824 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.607843 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.607873 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.607892 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:35Z","lastTransitionTime":"2025-11-28T11:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.711302 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.711350 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.711371 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.711398 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.711419 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:35Z","lastTransitionTime":"2025-11-28T11:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.814722 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.814783 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.814799 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.814823 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.814842 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:35Z","lastTransitionTime":"2025-11-28T11:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.918794 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.918863 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.918889 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.918923 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:35 crc kubenswrapper[4923]: I1128 11:09:35.918985 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:35Z","lastTransitionTime":"2025-11-28T11:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.022482 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.022557 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.022571 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.022590 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.022605 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:36Z","lastTransitionTime":"2025-11-28T11:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.125339 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.125387 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.125405 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.125431 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.125450 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:36Z","lastTransitionTime":"2025-11-28T11:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.168635 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.168691 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.168747 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:36 crc kubenswrapper[4923]: E1128 11:09:36.168825 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:36 crc kubenswrapper[4923]: E1128 11:09:36.169009 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:36 crc kubenswrapper[4923]: E1128 11:09:36.169278 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.229637 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.229697 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.229716 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.229741 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.229759 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:36Z","lastTransitionTime":"2025-11-28T11:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.334011 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.334092 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.334118 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.334156 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.334183 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:36Z","lastTransitionTime":"2025-11-28T11:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.439014 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.439081 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.439100 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.439131 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.439151 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:36Z","lastTransitionTime":"2025-11-28T11:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.543319 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.543386 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.543403 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.543429 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.543449 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:36Z","lastTransitionTime":"2025-11-28T11:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.646676 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.646723 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.646739 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.646767 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.646786 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:36Z","lastTransitionTime":"2025-11-28T11:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.749747 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.750016 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.750038 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.750108 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.750125 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:36Z","lastTransitionTime":"2025-11-28T11:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.853235 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.853311 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.853333 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.853364 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.853387 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:36Z","lastTransitionTime":"2025-11-28T11:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.956260 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.956335 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.956352 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.956376 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:36 crc kubenswrapper[4923]: I1128 11:09:36.956393 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:36Z","lastTransitionTime":"2025-11-28T11:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.059252 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.059315 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.059336 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.059363 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.059380 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:37Z","lastTransitionTime":"2025-11-28T11:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.162588 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.162640 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.162657 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.162679 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.162698 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:37Z","lastTransitionTime":"2025-11-28T11:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.167756 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:37 crc kubenswrapper[4923]: E1128 11:09:37.168054 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.266422 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.266489 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.266509 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.266536 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.266556 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:37Z","lastTransitionTime":"2025-11-28T11:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.369514 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.369586 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.369604 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.369630 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.369646 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:37Z","lastTransitionTime":"2025-11-28T11:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.472958 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.473007 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.473021 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.473043 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.473061 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:37Z","lastTransitionTime":"2025-11-28T11:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.576459 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.576523 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.576608 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.576648 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.576674 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:37Z","lastTransitionTime":"2025-11-28T11:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.680504 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.680584 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.680602 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.680642 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.680660 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:37Z","lastTransitionTime":"2025-11-28T11:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.783537 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.783629 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.783653 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.783686 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.783708 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:37Z","lastTransitionTime":"2025-11-28T11:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.887055 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.887124 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.887146 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.887180 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.887202 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:37Z","lastTransitionTime":"2025-11-28T11:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.989505 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.989599 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.989617 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.989671 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:37 crc kubenswrapper[4923]: I1128 11:09:37.989690 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:37Z","lastTransitionTime":"2025-11-28T11:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.092620 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.092726 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.092747 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.092775 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.092802 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:38Z","lastTransitionTime":"2025-11-28T11:09:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.168303 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.168386 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.168307 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:38 crc kubenswrapper[4923]: E1128 11:09:38.168473 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:38 crc kubenswrapper[4923]: E1128 11:09:38.168584 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:38 crc kubenswrapper[4923]: E1128 11:09:38.168742 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.195960 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.196008 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.196026 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.196048 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.196070 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:38Z","lastTransitionTime":"2025-11-28T11:09:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.299039 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.299103 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.299125 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.299147 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.299166 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:38Z","lastTransitionTime":"2025-11-28T11:09:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.401624 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.401683 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.401698 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.401720 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.401738 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:38Z","lastTransitionTime":"2025-11-28T11:09:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.504388 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.504451 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.504468 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.504495 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.504512 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:38Z","lastTransitionTime":"2025-11-28T11:09:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.612890 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.613048 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.613096 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.614526 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.614836 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:38Z","lastTransitionTime":"2025-11-28T11:09:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.718003 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.718068 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.718086 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.718111 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.718131 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:38Z","lastTransitionTime":"2025-11-28T11:09:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.821197 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.821259 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.821280 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.821305 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.821323 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:38Z","lastTransitionTime":"2025-11-28T11:09:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.923545 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.923589 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.923599 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.923617 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:38 crc kubenswrapper[4923]: I1128 11:09:38.923629 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:38Z","lastTransitionTime":"2025-11-28T11:09:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.025972 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.026034 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.026058 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.026086 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.026106 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:39Z","lastTransitionTime":"2025-11-28T11:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.129149 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.129206 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.129223 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.129245 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.129264 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:39Z","lastTransitionTime":"2025-11-28T11:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.169206 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:39 crc kubenswrapper[4923]: E1128 11:09:39.169368 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.231358 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.231409 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.231422 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.231437 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.231448 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:39Z","lastTransitionTime":"2025-11-28T11:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.333084 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.333138 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.333153 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.333169 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.333182 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:39Z","lastTransitionTime":"2025-11-28T11:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.435925 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.436028 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.436087 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.436116 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.436138 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:39Z","lastTransitionTime":"2025-11-28T11:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.538394 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.538491 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.538513 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.538535 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.538551 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:39Z","lastTransitionTime":"2025-11-28T11:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.640709 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.640759 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.640777 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.640798 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.640815 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:39Z","lastTransitionTime":"2025-11-28T11:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.743450 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.743486 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.743495 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.743508 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.743518 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:39Z","lastTransitionTime":"2025-11-28T11:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.846014 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.846066 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.846083 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.846105 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.846123 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:39Z","lastTransitionTime":"2025-11-28T11:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.950358 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.950415 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.950436 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.950467 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:39 crc kubenswrapper[4923]: I1128 11:09:39.950705 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:39Z","lastTransitionTime":"2025-11-28T11:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.054013 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.054053 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.054072 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.054097 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.054115 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:40Z","lastTransitionTime":"2025-11-28T11:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.157770 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.157852 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.157866 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.157891 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.157907 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:40Z","lastTransitionTime":"2025-11-28T11:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.168527 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.168678 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.168542 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:40 crc kubenswrapper[4923]: E1128 11:09:40.168752 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:40 crc kubenswrapper[4923]: E1128 11:09:40.168848 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:40 crc kubenswrapper[4923]: E1128 11:09:40.169096 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.261744 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.261824 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.261847 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.261876 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.261898 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:40Z","lastTransitionTime":"2025-11-28T11:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.365046 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.365108 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.365128 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.365150 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.365167 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:40Z","lastTransitionTime":"2025-11-28T11:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.434423 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.434468 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.434477 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.434493 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.434503 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:40Z","lastTransitionTime":"2025-11-28T11:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:40 crc kubenswrapper[4923]: E1128 11:09:40.455149 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:40Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.459640 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.459740 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.459759 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.459830 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.459850 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:40Z","lastTransitionTime":"2025-11-28T11:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:40 crc kubenswrapper[4923]: E1128 11:09:40.478573 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:40Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.483569 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.483787 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.483972 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.484176 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.484311 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:40Z","lastTransitionTime":"2025-11-28T11:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:40 crc kubenswrapper[4923]: E1128 11:09:40.505230 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:40Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.510228 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.510499 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.510680 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.510830 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.511019 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:40Z","lastTransitionTime":"2025-11-28T11:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:40 crc kubenswrapper[4923]: E1128 11:09:40.534447 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:40Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.538514 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.538717 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.538842 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.539058 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.539198 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:40Z","lastTransitionTime":"2025-11-28T11:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:40 crc kubenswrapper[4923]: E1128 11:09:40.555153 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:40Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:40 crc kubenswrapper[4923]: E1128 11:09:40.555387 4923 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.557772 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.557799 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.557809 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.557828 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.557841 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:40Z","lastTransitionTime":"2025-11-28T11:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.661216 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.661271 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.661288 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.661310 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.661327 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:40Z","lastTransitionTime":"2025-11-28T11:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.764616 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.764677 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.764696 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.764721 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.764740 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:40Z","lastTransitionTime":"2025-11-28T11:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.868108 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.868486 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.868663 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.868852 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.869053 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:40Z","lastTransitionTime":"2025-11-28T11:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.972345 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.972419 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.972439 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.972465 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:40 crc kubenswrapper[4923]: I1128 11:09:40.972484 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:40Z","lastTransitionTime":"2025-11-28T11:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.075674 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.075725 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.075744 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.075767 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.075817 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:41Z","lastTransitionTime":"2025-11-28T11:09:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.169039 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:41 crc kubenswrapper[4923]: E1128 11:09:41.169212 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.180438 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.180488 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.180505 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.180527 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.180545 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:41Z","lastTransitionTime":"2025-11-28T11:09:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.188035 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:41Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.221160 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:31Z\\\",\\\"message\\\":\\\"c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3b-4ff8-8926-04ac25de450e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 11:09:31.089339 6468 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 11:09:31.089404 6468 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:41Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.240187 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69bb796e49d5ca00e472f027f1443316695a4e243faff1eec26bc13d67bbc60a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f90a5608dca4e71887975960683dda08b1b5e01f598af251663a968bb7fe56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:41Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.266341 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:41Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.283509 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.283565 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.283583 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.283610 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.283628 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:41Z","lastTransitionTime":"2025-11-28T11:09:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.290916 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:41Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.309505 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:41Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.328223 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:41Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.344395 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:41Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.357343 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:41Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.372499 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:41Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.387307 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.387363 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.387383 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.387407 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.387425 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:41Z","lastTransitionTime":"2025-11-28T11:09:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.388300 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b483d037-b692-45d5-bb83-02e029649100\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-g2kmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:41Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.409214 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:41Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.425571 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:41Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.442666 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:41Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.461854 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:41Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.477817 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3af1089a-5262-4fa0-85fb-9f992ee6274d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://403d762c4ba4c4f3309ef1b447be25f7882da8a2d03b9376711063165438294f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3513d0400c621295e074b54a00fe7f284c38bebd8e7f11315db91fef9a2a4693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81443f6c4751860dce1d5ecf0f867a1c9641a989cbfd171e71de418f738108c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:41Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.490707 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.490758 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.490777 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.490799 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.490816 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:41Z","lastTransitionTime":"2025-11-28T11:09:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.501278 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:41Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.593793 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.594130 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.594179 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.594209 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.594228 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:41Z","lastTransitionTime":"2025-11-28T11:09:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.697161 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.697220 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.697241 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.697265 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.697282 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:41Z","lastTransitionTime":"2025-11-28T11:09:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.801602 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.801971 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.802316 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.803072 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.803614 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:41Z","lastTransitionTime":"2025-11-28T11:09:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.907185 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.907242 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.907259 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.907284 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:41 crc kubenswrapper[4923]: I1128 11:09:41.907301 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:41Z","lastTransitionTime":"2025-11-28T11:09:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.009580 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.009628 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.009646 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.009670 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.009690 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:42Z","lastTransitionTime":"2025-11-28T11:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.112645 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.112687 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.112700 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.112718 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.112730 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:42Z","lastTransitionTime":"2025-11-28T11:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.167877 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.167991 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.168078 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:42 crc kubenswrapper[4923]: E1128 11:09:42.168024 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:42 crc kubenswrapper[4923]: E1128 11:09:42.168227 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:42 crc kubenswrapper[4923]: E1128 11:09:42.168374 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.214792 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.214834 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.214845 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.214862 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.214874 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:42Z","lastTransitionTime":"2025-11-28T11:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.317541 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.317604 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.317622 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.317645 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.317675 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:42Z","lastTransitionTime":"2025-11-28T11:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.420730 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.420785 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.420801 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.420826 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.420844 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:42Z","lastTransitionTime":"2025-11-28T11:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.523365 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.523403 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.523414 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.523430 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.523442 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:42Z","lastTransitionTime":"2025-11-28T11:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.626500 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.626536 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.626545 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.626557 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.626565 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:42Z","lastTransitionTime":"2025-11-28T11:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.730034 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.730068 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.730076 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.730089 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.730098 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:42Z","lastTransitionTime":"2025-11-28T11:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.833390 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.833451 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.833467 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.833491 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.833509 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:42Z","lastTransitionTime":"2025-11-28T11:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.936634 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.936705 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.936725 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.936749 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:42 crc kubenswrapper[4923]: I1128 11:09:42.936765 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:42Z","lastTransitionTime":"2025-11-28T11:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.039666 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.039734 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.039750 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.039774 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.039791 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:43Z","lastTransitionTime":"2025-11-28T11:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.142952 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.143012 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.143033 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.143057 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.143076 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:43Z","lastTransitionTime":"2025-11-28T11:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.173528 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:43 crc kubenswrapper[4923]: E1128 11:09:43.173745 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.245284 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.245320 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.245331 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.245345 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.245359 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:43Z","lastTransitionTime":"2025-11-28T11:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.348642 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.348732 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.348750 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.348805 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.348823 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:43Z","lastTransitionTime":"2025-11-28T11:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.452215 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.452293 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.452310 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.452362 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.452378 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:43Z","lastTransitionTime":"2025-11-28T11:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.556129 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.556175 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.556194 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.556216 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.556233 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:43Z","lastTransitionTime":"2025-11-28T11:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.658495 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.658599 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.658639 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.658673 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.658693 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:43Z","lastTransitionTime":"2025-11-28T11:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.761071 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.761128 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.761148 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.761170 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.761188 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:43Z","lastTransitionTime":"2025-11-28T11:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.863913 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.864002 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.864024 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.864056 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.864078 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:43Z","lastTransitionTime":"2025-11-28T11:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.966703 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.966760 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.966777 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.966803 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:43 crc kubenswrapper[4923]: I1128 11:09:43.966820 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:43Z","lastTransitionTime":"2025-11-28T11:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.069604 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.069703 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.069720 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.069744 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.069762 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:44Z","lastTransitionTime":"2025-11-28T11:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.167631 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.167655 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:44 crc kubenswrapper[4923]: E1128 11:09:44.167735 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.167631 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:44 crc kubenswrapper[4923]: E1128 11:09:44.168069 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:44 crc kubenswrapper[4923]: E1128 11:09:44.168026 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.172165 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.172262 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.172281 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.172307 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.172615 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:44Z","lastTransitionTime":"2025-11-28T11:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.275430 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.275493 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.275509 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.275534 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.275552 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:44Z","lastTransitionTime":"2025-11-28T11:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.378284 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.378320 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.378330 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.378347 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.378356 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:44Z","lastTransitionTime":"2025-11-28T11:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.481248 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.481309 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.481333 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.481362 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.481383 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:44Z","lastTransitionTime":"2025-11-28T11:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.584575 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.584639 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.584656 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.584682 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.584699 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:44Z","lastTransitionTime":"2025-11-28T11:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.687807 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.687835 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.687846 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.687857 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.687867 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:44Z","lastTransitionTime":"2025-11-28T11:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.790652 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.790682 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.790692 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.790704 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.790713 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:44Z","lastTransitionTime":"2025-11-28T11:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.892796 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.892849 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.892862 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.892880 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.892892 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:44Z","lastTransitionTime":"2025-11-28T11:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.995877 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.995915 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.995923 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.995991 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:44 crc kubenswrapper[4923]: I1128 11:09:44.996003 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:44Z","lastTransitionTime":"2025-11-28T11:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.098501 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.098580 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.098599 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.098624 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.098640 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:45Z","lastTransitionTime":"2025-11-28T11:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.169776 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:45 crc kubenswrapper[4923]: E1128 11:09:45.169880 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.178001 4923 scope.go:117] "RemoveContainer" containerID="026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee" Nov 28 11:09:45 crc kubenswrapper[4923]: E1128 11:09:45.179030 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\"" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.200431 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.200464 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.200472 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.200486 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.200496 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:45Z","lastTransitionTime":"2025-11-28T11:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.302618 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.302677 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.302696 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.302720 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.302737 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:45Z","lastTransitionTime":"2025-11-28T11:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.405471 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.405530 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.405547 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.405571 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.405588 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:45Z","lastTransitionTime":"2025-11-28T11:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.507658 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.507724 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.507770 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.507801 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.507823 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:45Z","lastTransitionTime":"2025-11-28T11:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.610123 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.610186 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.610206 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.610233 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.610252 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:45Z","lastTransitionTime":"2025-11-28T11:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.712467 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.712499 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.712508 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.712523 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.712533 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:45Z","lastTransitionTime":"2025-11-28T11:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.814179 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.814223 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.814233 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.814245 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.814253 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:45Z","lastTransitionTime":"2025-11-28T11:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.916619 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.916709 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.916727 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.916749 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:45 crc kubenswrapper[4923]: I1128 11:09:45.916796 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:45Z","lastTransitionTime":"2025-11-28T11:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.019894 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.019988 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.020012 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.020041 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.020064 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:46Z","lastTransitionTime":"2025-11-28T11:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.122467 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.122511 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.122523 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.122540 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.122554 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:46Z","lastTransitionTime":"2025-11-28T11:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.168631 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.168666 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.168643 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:46 crc kubenswrapper[4923]: E1128 11:09:46.168819 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:46 crc kubenswrapper[4923]: E1128 11:09:46.168995 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:46 crc kubenswrapper[4923]: E1128 11:09:46.169149 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.225769 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.225836 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.225855 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.225882 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.225903 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:46Z","lastTransitionTime":"2025-11-28T11:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.327976 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.328008 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.328039 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.328056 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.328067 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:46Z","lastTransitionTime":"2025-11-28T11:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.430680 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.430720 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.430728 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.430741 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.430752 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:46Z","lastTransitionTime":"2025-11-28T11:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.533050 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.533096 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.533107 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.533126 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.533138 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:46Z","lastTransitionTime":"2025-11-28T11:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.635819 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.636119 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.636128 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.636144 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.636153 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:46Z","lastTransitionTime":"2025-11-28T11:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.739084 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.739113 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.739139 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.739151 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.739160 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:46Z","lastTransitionTime":"2025-11-28T11:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.794003 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs\") pod \"network-metrics-daemon-g2kmb\" (UID: \"b483d037-b692-45d5-bb83-02e029649100\") " pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:46 crc kubenswrapper[4923]: E1128 11:09:46.794141 4923 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 11:09:46 crc kubenswrapper[4923]: E1128 11:09:46.794208 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs podName:b483d037-b692-45d5-bb83-02e029649100 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:18.794191346 +0000 UTC m=+97.922875556 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs") pod "network-metrics-daemon-g2kmb" (UID: "b483d037-b692-45d5-bb83-02e029649100") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.841815 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.841843 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.841850 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.841862 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.841871 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:46Z","lastTransitionTime":"2025-11-28T11:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.944485 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.944509 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.944519 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.944531 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:46 crc kubenswrapper[4923]: I1128 11:09:46.944540 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:46Z","lastTransitionTime":"2025-11-28T11:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.047852 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.047906 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.047924 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.047988 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.048006 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:47Z","lastTransitionTime":"2025-11-28T11:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.136331 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-h5s2m_84374038-67ce-4dc0-a2c2-6eed9650c604/kube-multus/0.log" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.136388 4923 generic.go:334] "Generic (PLEG): container finished" podID="84374038-67ce-4dc0-a2c2-6eed9650c604" containerID="addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f" exitCode=1 Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.136425 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-h5s2m" event={"ID":"84374038-67ce-4dc0-a2c2-6eed9650c604","Type":"ContainerDied","Data":"addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f"} Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.136872 4923 scope.go:117] "RemoveContainer" containerID="addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.151835 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.151881 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.151897 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.151918 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.151958 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:47Z","lastTransitionTime":"2025-11-28T11:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.156381 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:46Z\\\",\\\"message\\\":\\\"2025-11-28T11:09:01+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_586be321-5a1b-4320-8bdd-14f453eec838\\\\n2025-11-28T11:09:01+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_586be321-5a1b-4320-8bdd-14f453eec838 to /host/opt/cni/bin/\\\\n2025-11-28T11:09:01Z [verbose] multus-daemon started\\\\n2025-11-28T11:09:01Z [verbose] Readiness Indicator file check\\\\n2025-11-28T11:09:46Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:47Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.167487 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:47Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.167789 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:47 crc kubenswrapper[4923]: E1128 11:09:47.167889 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.185822 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3af1089a-5262-4fa0-85fb-9f992ee6274d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://403d762c4ba4c4f3309ef1b447be25f7882da8a2d03b9376711063165438294f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3513d0400c621295e074b54a00fe7f284c38bebd8e7f11315db91fef9a2a4693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81443f6c4751860dce1d5ecf0f867a1c9641a989cbfd171e71de418f738108c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:47Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.213257 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:47Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.243183 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:47Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.254122 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.254152 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.254162 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.254176 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.254191 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:47Z","lastTransitionTime":"2025-11-28T11:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.267194 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:47Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.277687 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:47Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.294856 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:31Z\\\",\\\"message\\\":\\\"c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3b-4ff8-8926-04ac25de450e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 11:09:31.089339 6468 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 11:09:31.089404 6468 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:47Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.304492 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69bb796e49d5ca00e472f027f1443316695a4e243faff1eec26bc13d67bbc60a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f90a5608dca4e71887975960683dda08b1b5e01f598af251663a968bb7fe56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:47Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.316618 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:47Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.327317 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:47Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.334706 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:47Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.345180 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:47Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.354965 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:47Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.355965 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.356000 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.356010 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.356023 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.356033 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:47Z","lastTransitionTime":"2025-11-28T11:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.367889 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:47Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.376594 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:47Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.384860 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b483d037-b692-45d5-bb83-02e029649100\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-g2kmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:47Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.458769 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.458837 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.458854 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.458877 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.458911 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:47Z","lastTransitionTime":"2025-11-28T11:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.561325 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.561354 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.561366 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.561381 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.561390 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:47Z","lastTransitionTime":"2025-11-28T11:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.663540 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.663603 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.663624 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.663676 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.663695 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:47Z","lastTransitionTime":"2025-11-28T11:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.765681 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.765715 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.765724 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.765737 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.765747 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:47Z","lastTransitionTime":"2025-11-28T11:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.869057 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.869104 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.869113 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.869127 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.869139 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:47Z","lastTransitionTime":"2025-11-28T11:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.971816 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.971845 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.971855 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.971866 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:47 crc kubenswrapper[4923]: I1128 11:09:47.971876 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:47Z","lastTransitionTime":"2025-11-28T11:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.074095 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.074122 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.074132 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.074142 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.074149 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:48Z","lastTransitionTime":"2025-11-28T11:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.141704 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-h5s2m_84374038-67ce-4dc0-a2c2-6eed9650c604/kube-multus/0.log" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.141780 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-h5s2m" event={"ID":"84374038-67ce-4dc0-a2c2-6eed9650c604","Type":"ContainerStarted","Data":"53821c93696c6770adcfbe02308f05bdb9635578bd1dfa8d3201ecf94fa8b37c"} Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.162034 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3af1089a-5262-4fa0-85fb-9f992ee6274d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://403d762c4ba4c4f3309ef1b447be25f7882da8a2d03b9376711063165438294f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3513d0400c621295e074b54a00fe7f284c38bebd8e7f11315db91fef9a2a4693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81443f6c4751860dce1d5ecf0f867a1c9641a989cbfd171e71de418f738108c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:48Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.168102 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.168123 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.168177 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:48 crc kubenswrapper[4923]: E1128 11:09:48.168200 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:48 crc kubenswrapper[4923]: E1128 11:09:48.168326 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:48 crc kubenswrapper[4923]: E1128 11:09:48.168366 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.176474 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.176512 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.176522 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.176537 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.176548 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:48Z","lastTransitionTime":"2025-11-28T11:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.181172 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://53821c93696c6770adcfbe02308f05bdb9635578bd1dfa8d3201ecf94fa8b37c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:46Z\\\",\\\"message\\\":\\\"2025-11-28T11:09:01+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_586be321-5a1b-4320-8bdd-14f453eec838\\\\n2025-11-28T11:09:01+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_586be321-5a1b-4320-8bdd-14f453eec838 to /host/opt/cni/bin/\\\\n2025-11-28T11:09:01Z [verbose] multus-daemon started\\\\n2025-11-28T11:09:01Z [verbose] Readiness Indicator file check\\\\n2025-11-28T11:09:46Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:48Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.195143 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:48Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.211105 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:48Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.230157 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:48Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.246914 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:48Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.265980 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:48Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.276532 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:48Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.278920 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.278977 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.278989 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.279007 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.279019 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:48Z","lastTransitionTime":"2025-11-28T11:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.293625 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:31Z\\\",\\\"message\\\":\\\"c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3b-4ff8-8926-04ac25de450e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 11:09:31.089339 6468 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 11:09:31.089404 6468 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:48Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.307075 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69bb796e49d5ca00e472f027f1443316695a4e243faff1eec26bc13d67bbc60a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f90a5608dca4e71887975960683dda08b1b5e01f598af251663a968bb7fe56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:48Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.320491 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:48Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.334751 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:48Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.350086 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:48Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.361745 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:48Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.381967 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.382004 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.382040 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.382059 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.382071 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:48Z","lastTransitionTime":"2025-11-28T11:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.384623 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:48Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.393648 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:48Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.407153 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b483d037-b692-45d5-bb83-02e029649100\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-g2kmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:48Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.483721 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.483749 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.483759 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.483771 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.483780 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:48Z","lastTransitionTime":"2025-11-28T11:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.586647 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.586676 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.586684 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.586704 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.586714 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:48Z","lastTransitionTime":"2025-11-28T11:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.688647 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.688763 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.688783 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.688806 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.688827 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:48Z","lastTransitionTime":"2025-11-28T11:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.791661 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.791755 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.791778 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.791805 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.791826 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:48Z","lastTransitionTime":"2025-11-28T11:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.895204 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.895241 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.895251 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.895266 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.895275 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:48Z","lastTransitionTime":"2025-11-28T11:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.997397 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.997431 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.997439 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.997451 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:48 crc kubenswrapper[4923]: I1128 11:09:48.997459 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:48Z","lastTransitionTime":"2025-11-28T11:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.099635 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.099694 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.099712 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.099739 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.099755 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:49Z","lastTransitionTime":"2025-11-28T11:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.168533 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:49 crc kubenswrapper[4923]: E1128 11:09:49.168754 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.202142 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.202192 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.202202 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.202215 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.202224 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:49Z","lastTransitionTime":"2025-11-28T11:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.305004 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.305055 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.305066 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.305080 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.305090 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:49Z","lastTransitionTime":"2025-11-28T11:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.406774 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.406808 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.406818 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.406833 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.406843 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:49Z","lastTransitionTime":"2025-11-28T11:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.509250 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.509290 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.509303 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.509320 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.509330 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:49Z","lastTransitionTime":"2025-11-28T11:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.612183 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.612246 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.612265 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.612293 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.612315 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:49Z","lastTransitionTime":"2025-11-28T11:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.714819 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.714880 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.714900 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.714924 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.714973 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:49Z","lastTransitionTime":"2025-11-28T11:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.817842 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.817905 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.817924 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.817977 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.818000 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:49Z","lastTransitionTime":"2025-11-28T11:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.920792 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.920858 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.920876 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.920901 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:49 crc kubenswrapper[4923]: I1128 11:09:49.920918 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:49Z","lastTransitionTime":"2025-11-28T11:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.024526 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.024604 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.024623 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.024650 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.024668 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:50Z","lastTransitionTime":"2025-11-28T11:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.126408 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.126473 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.126490 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.126516 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.126535 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:50Z","lastTransitionTime":"2025-11-28T11:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.167765 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.167820 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.167878 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:50 crc kubenswrapper[4923]: E1128 11:09:50.167980 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:50 crc kubenswrapper[4923]: E1128 11:09:50.168078 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:50 crc kubenswrapper[4923]: E1128 11:09:50.168186 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.229017 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.229067 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.229084 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.229107 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.229125 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:50Z","lastTransitionTime":"2025-11-28T11:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.331660 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.331774 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.331793 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.331817 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.331834 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:50Z","lastTransitionTime":"2025-11-28T11:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.434824 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.434865 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.434909 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.434957 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.434975 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:50Z","lastTransitionTime":"2025-11-28T11:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.537289 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.537336 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.537354 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.537376 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.537392 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:50Z","lastTransitionTime":"2025-11-28T11:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.639268 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.639325 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.639341 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.639364 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.639381 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:50Z","lastTransitionTime":"2025-11-28T11:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.736252 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.736285 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.736295 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.736307 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.736317 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:50Z","lastTransitionTime":"2025-11-28T11:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:50 crc kubenswrapper[4923]: E1128 11:09:50.756182 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:50Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.759848 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.759875 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.759902 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.759916 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.759925 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:50Z","lastTransitionTime":"2025-11-28T11:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:50 crc kubenswrapper[4923]: E1128 11:09:50.775393 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:50Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.778550 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.778602 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.778620 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.778645 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.778662 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:50Z","lastTransitionTime":"2025-11-28T11:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:50 crc kubenswrapper[4923]: E1128 11:09:50.798655 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:50Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.802574 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.802609 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.802617 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.802632 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.802641 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:50Z","lastTransitionTime":"2025-11-28T11:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:50 crc kubenswrapper[4923]: E1128 11:09:50.821103 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:50Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.823967 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.824014 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.824032 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.824053 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.824069 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:50Z","lastTransitionTime":"2025-11-28T11:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:50 crc kubenswrapper[4923]: E1128 11:09:50.843951 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:50Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:50 crc kubenswrapper[4923]: E1128 11:09:50.844069 4923 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.845455 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.845483 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.845490 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.845504 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.845513 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:50Z","lastTransitionTime":"2025-11-28T11:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.947793 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.947829 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.947839 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.947855 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:50 crc kubenswrapper[4923]: I1128 11:09:50.947867 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:50Z","lastTransitionTime":"2025-11-28T11:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.050466 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.050513 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.050523 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.050538 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.050549 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:51Z","lastTransitionTime":"2025-11-28T11:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.152755 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.152809 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.152827 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.152855 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.152873 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:51Z","lastTransitionTime":"2025-11-28T11:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.168369 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:51 crc kubenswrapper[4923]: E1128 11:09:51.168568 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.182998 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:51Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.196766 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b483d037-b692-45d5-bb83-02e029649100\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-g2kmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:51Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.215085 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:51Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.229768 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:51Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.241333 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:51Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.251274 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:51Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.255001 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.255021 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.255030 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.255043 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.255052 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:51Z","lastTransitionTime":"2025-11-28T11:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.272560 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:51Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.285699 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3af1089a-5262-4fa0-85fb-9f992ee6274d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://403d762c4ba4c4f3309ef1b447be25f7882da8a2d03b9376711063165438294f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3513d0400c621295e074b54a00fe7f284c38bebd8e7f11315db91fef9a2a4693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81443f6c4751860dce1d5ecf0f867a1c9641a989cbfd171e71de418f738108c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:51Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.298751 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://53821c93696c6770adcfbe02308f05bdb9635578bd1dfa8d3201ecf94fa8b37c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:46Z\\\",\\\"message\\\":\\\"2025-11-28T11:09:01+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_586be321-5a1b-4320-8bdd-14f453eec838\\\\n2025-11-28T11:09:01+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_586be321-5a1b-4320-8bdd-14f453eec838 to /host/opt/cni/bin/\\\\n2025-11-28T11:09:01Z [verbose] multus-daemon started\\\\n2025-11-28T11:09:01Z [verbose] Readiness Indicator file check\\\\n2025-11-28T11:09:46Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:51Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.315488 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:31Z\\\",\\\"message\\\":\\\"c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3b-4ff8-8926-04ac25de450e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 11:09:31.089339 6468 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 11:09:31.089404 6468 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:51Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.327972 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69bb796e49d5ca00e472f027f1443316695a4e243faff1eec26bc13d67bbc60a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f90a5608dca4e71887975960683dda08b1b5e01f598af251663a968bb7fe56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:51Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.344844 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:51Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.356285 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.356328 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.356338 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.356353 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.356362 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:51Z","lastTransitionTime":"2025-11-28T11:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.362256 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:51Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.375506 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:51Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.391124 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:51Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.402136 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:51Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.411333 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:09:51Z is after 2025-08-24T17:21:41Z" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.458800 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.458834 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.458843 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.458858 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.458867 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:51Z","lastTransitionTime":"2025-11-28T11:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.562258 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.562293 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.562302 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.562314 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.562322 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:51Z","lastTransitionTime":"2025-11-28T11:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.665052 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.665104 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.665121 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.665141 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.665160 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:51Z","lastTransitionTime":"2025-11-28T11:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.767201 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.767479 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.767497 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.767524 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.767540 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:51Z","lastTransitionTime":"2025-11-28T11:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.869741 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.869804 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.869822 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.869846 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.869863 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:51Z","lastTransitionTime":"2025-11-28T11:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.971676 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.971714 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.971722 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.971736 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:51 crc kubenswrapper[4923]: I1128 11:09:51.971744 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:51Z","lastTransitionTime":"2025-11-28T11:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.080095 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.080149 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.080167 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.080192 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.080208 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:52Z","lastTransitionTime":"2025-11-28T11:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.168158 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.168214 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.168164 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:52 crc kubenswrapper[4923]: E1128 11:09:52.168329 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:52 crc kubenswrapper[4923]: E1128 11:09:52.168423 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:52 crc kubenswrapper[4923]: E1128 11:09:52.168522 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.183082 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.183126 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.183142 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.183164 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.183180 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:52Z","lastTransitionTime":"2025-11-28T11:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.285552 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.285587 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.285595 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.285609 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.285619 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:52Z","lastTransitionTime":"2025-11-28T11:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.388509 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.388701 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.388841 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.389027 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.389154 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:52Z","lastTransitionTime":"2025-11-28T11:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.491899 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.491957 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.491966 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.491982 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.491994 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:52Z","lastTransitionTime":"2025-11-28T11:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.595239 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.595333 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.595356 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.595381 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.595432 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:52Z","lastTransitionTime":"2025-11-28T11:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.698964 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.699786 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.699974 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.700161 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.700295 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:52Z","lastTransitionTime":"2025-11-28T11:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.803183 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.803231 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.803240 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.803255 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.803266 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:52Z","lastTransitionTime":"2025-11-28T11:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.907098 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.907504 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.907673 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.907817 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:52 crc kubenswrapper[4923]: I1128 11:09:52.907994 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:52Z","lastTransitionTime":"2025-11-28T11:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.011166 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.011241 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.011259 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.011288 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.011307 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:53Z","lastTransitionTime":"2025-11-28T11:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.114780 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.114855 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.114874 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.114903 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.114923 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:53Z","lastTransitionTime":"2025-11-28T11:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.168006 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:53 crc kubenswrapper[4923]: E1128 11:09:53.168511 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.218172 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.218224 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.218242 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.218265 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.218282 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:53Z","lastTransitionTime":"2025-11-28T11:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.321519 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.321567 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.321584 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.321607 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.321624 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:53Z","lastTransitionTime":"2025-11-28T11:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.423473 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.423509 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.423520 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.423534 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.423544 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:53Z","lastTransitionTime":"2025-11-28T11:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.526265 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.526743 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.526898 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.527073 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.527259 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:53Z","lastTransitionTime":"2025-11-28T11:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.629455 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.629812 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.629993 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.630192 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.630331 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:53Z","lastTransitionTime":"2025-11-28T11:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.734050 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.734089 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.734099 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.734122 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.734131 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:53Z","lastTransitionTime":"2025-11-28T11:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.836850 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.836909 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.836928 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.836987 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.837003 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:53Z","lastTransitionTime":"2025-11-28T11:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.940427 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.940483 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.940501 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.940524 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:53 crc kubenswrapper[4923]: I1128 11:09:53.940542 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:53Z","lastTransitionTime":"2025-11-28T11:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.043547 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.043614 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.043637 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.043671 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.043699 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:54Z","lastTransitionTime":"2025-11-28T11:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.146982 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.147043 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.147061 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.147087 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.147105 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:54Z","lastTransitionTime":"2025-11-28T11:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.168535 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:54 crc kubenswrapper[4923]: E1128 11:09:54.168689 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.168911 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:54 crc kubenswrapper[4923]: E1128 11:09:54.169043 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.169229 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:54 crc kubenswrapper[4923]: E1128 11:09:54.169318 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.249883 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.249917 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.249948 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.249966 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.249979 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:54Z","lastTransitionTime":"2025-11-28T11:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.353150 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.353179 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.353188 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.353205 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.353216 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:54Z","lastTransitionTime":"2025-11-28T11:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.455865 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.456334 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.456361 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.456385 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.456402 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:54Z","lastTransitionTime":"2025-11-28T11:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.558296 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.558380 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.558404 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.558436 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.558462 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:54Z","lastTransitionTime":"2025-11-28T11:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.661001 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.661058 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.661075 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.661099 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.661119 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:54Z","lastTransitionTime":"2025-11-28T11:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.763875 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.763955 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.763972 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.763996 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.764016 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:54Z","lastTransitionTime":"2025-11-28T11:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.867671 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.867768 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.867786 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.867811 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.868196 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:54Z","lastTransitionTime":"2025-11-28T11:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.971316 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.971356 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.971370 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.971389 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:54 crc kubenswrapper[4923]: I1128 11:09:54.971402 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:54Z","lastTransitionTime":"2025-11-28T11:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.074302 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.074633 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.074774 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.074916 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.075090 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:55Z","lastTransitionTime":"2025-11-28T11:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.168536 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:55 crc kubenswrapper[4923]: E1128 11:09:55.168720 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.177358 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.177417 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.177437 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.177463 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.177480 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:55Z","lastTransitionTime":"2025-11-28T11:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.280664 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.280724 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.280742 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.280764 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.280782 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:55Z","lastTransitionTime":"2025-11-28T11:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.383038 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.383330 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.383470 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.383640 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.383786 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:55Z","lastTransitionTime":"2025-11-28T11:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.487273 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.487334 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.487358 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.487436 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.487465 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:55Z","lastTransitionTime":"2025-11-28T11:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.591383 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.591458 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.591484 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.591564 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.591635 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:55Z","lastTransitionTime":"2025-11-28T11:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.694963 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.695036 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.695055 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.695078 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.695094 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:55Z","lastTransitionTime":"2025-11-28T11:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.798032 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.798813 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.799082 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.799342 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.799549 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:55Z","lastTransitionTime":"2025-11-28T11:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.902770 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.902832 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.902848 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.902872 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:55 crc kubenswrapper[4923]: I1128 11:09:55.902890 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:55Z","lastTransitionTime":"2025-11-28T11:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.006154 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.006237 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.006260 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.006287 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.006310 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:56Z","lastTransitionTime":"2025-11-28T11:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.109303 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.109369 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.109392 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.109421 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.109442 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:56Z","lastTransitionTime":"2025-11-28T11:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.167727 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:56 crc kubenswrapper[4923]: E1128 11:09:56.167910 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.167736 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:56 crc kubenswrapper[4923]: E1128 11:09:56.168225 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.168534 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:56 crc kubenswrapper[4923]: E1128 11:09:56.168893 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.212247 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.212650 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.212854 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.213089 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.213330 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:56Z","lastTransitionTime":"2025-11-28T11:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.316844 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.316901 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.316917 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.316967 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.316985 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:56Z","lastTransitionTime":"2025-11-28T11:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.420046 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.420171 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.420237 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.420263 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.420280 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:56Z","lastTransitionTime":"2025-11-28T11:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.522889 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.522960 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.522977 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.522999 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.523017 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:56Z","lastTransitionTime":"2025-11-28T11:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.625039 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.625526 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.625751 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.626010 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.626244 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:56Z","lastTransitionTime":"2025-11-28T11:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.729415 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.729785 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.730069 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.730270 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.730502 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:56Z","lastTransitionTime":"2025-11-28T11:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.834136 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.834911 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.835122 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.835405 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.835604 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:56Z","lastTransitionTime":"2025-11-28T11:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.942362 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.942430 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.942450 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.942476 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:56 crc kubenswrapper[4923]: I1128 11:09:56.942503 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:56Z","lastTransitionTime":"2025-11-28T11:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.046026 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.046080 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.046097 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.046124 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.046142 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:57Z","lastTransitionTime":"2025-11-28T11:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.149214 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.149260 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.149276 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.149298 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.149316 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:57Z","lastTransitionTime":"2025-11-28T11:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.168097 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:57 crc kubenswrapper[4923]: E1128 11:09:57.168297 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.252218 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.252278 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.252296 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.252321 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.252339 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:57Z","lastTransitionTime":"2025-11-28T11:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.355088 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.355168 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.355185 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.355212 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.355231 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:57Z","lastTransitionTime":"2025-11-28T11:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.458437 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.458492 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.458511 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.458534 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.458554 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:57Z","lastTransitionTime":"2025-11-28T11:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.561307 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.561391 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.561408 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.561432 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.561449 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:57Z","lastTransitionTime":"2025-11-28T11:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.665192 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.665264 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.665287 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.665316 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.665339 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:57Z","lastTransitionTime":"2025-11-28T11:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.768742 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.769299 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.769610 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.770000 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.770323 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:57Z","lastTransitionTime":"2025-11-28T11:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.874023 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.874074 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.874092 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.874120 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.874139 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:57Z","lastTransitionTime":"2025-11-28T11:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.976522 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.976564 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.976579 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.976601 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:57 crc kubenswrapper[4923]: I1128 11:09:57.976615 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:57Z","lastTransitionTime":"2025-11-28T11:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.080049 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.080119 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.080131 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.080154 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.080169 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:58Z","lastTransitionTime":"2025-11-28T11:09:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.167588 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:09:58 crc kubenswrapper[4923]: E1128 11:09:58.167774 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.168104 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.168215 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:09:58 crc kubenswrapper[4923]: E1128 11:09:58.168455 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:09:58 crc kubenswrapper[4923]: E1128 11:09:58.168582 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.183482 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.183720 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.183736 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.183760 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.183776 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:58Z","lastTransitionTime":"2025-11-28T11:09:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.287328 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.287372 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.287381 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.287396 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.287408 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:58Z","lastTransitionTime":"2025-11-28T11:09:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.390874 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.390987 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.391005 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.391038 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.391057 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:58Z","lastTransitionTime":"2025-11-28T11:09:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.493781 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.493813 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.493822 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.493838 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.493848 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:58Z","lastTransitionTime":"2025-11-28T11:09:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.597354 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.597424 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.597443 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.597470 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.597490 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:58Z","lastTransitionTime":"2025-11-28T11:09:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.700995 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.701088 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.701108 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.701273 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.701312 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:58Z","lastTransitionTime":"2025-11-28T11:09:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.805182 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.805231 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.805247 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.805272 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.805289 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:58Z","lastTransitionTime":"2025-11-28T11:09:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.908718 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.908773 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.908789 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.908814 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:58 crc kubenswrapper[4923]: I1128 11:09:58.908832 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:58Z","lastTransitionTime":"2025-11-28T11:09:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.012238 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.012386 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.012419 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.012511 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.012541 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:59Z","lastTransitionTime":"2025-11-28T11:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.115957 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.116029 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.116046 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.116068 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.116083 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:59Z","lastTransitionTime":"2025-11-28T11:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.167857 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:09:59 crc kubenswrapper[4923]: E1128 11:09:59.168091 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.219366 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.219441 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.219465 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.219497 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.219520 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:59Z","lastTransitionTime":"2025-11-28T11:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.328270 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.328912 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.328982 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.329015 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.329037 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:59Z","lastTransitionTime":"2025-11-28T11:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.432589 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.432656 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.432675 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.432701 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.432717 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:59Z","lastTransitionTime":"2025-11-28T11:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.536098 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.536167 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.536189 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.536219 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.536240 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:59Z","lastTransitionTime":"2025-11-28T11:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.639776 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.639829 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.639845 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.640040 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.640072 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:59Z","lastTransitionTime":"2025-11-28T11:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.743688 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.743745 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.743766 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.743797 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.743816 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:59Z","lastTransitionTime":"2025-11-28T11:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.848842 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.848890 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.848909 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.848955 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.849452 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:59Z","lastTransitionTime":"2025-11-28T11:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.952587 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.952640 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.952656 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.952680 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:09:59 crc kubenswrapper[4923]: I1128 11:09:59.952698 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:09:59Z","lastTransitionTime":"2025-11-28T11:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.056198 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.056271 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.056293 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.056389 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.056488 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:00Z","lastTransitionTime":"2025-11-28T11:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.159536 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.159611 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.159634 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.159663 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.159687 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:00Z","lastTransitionTime":"2025-11-28T11:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.168363 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:00 crc kubenswrapper[4923]: E1128 11:10:00.168538 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.168564 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.169140 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:00 crc kubenswrapper[4923]: E1128 11:10:00.169188 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:00 crc kubenswrapper[4923]: E1128 11:10:00.169455 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.169847 4923 scope.go:117] "RemoveContainer" containerID="026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.270809 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.270927 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.271023 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.271091 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.271110 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:00Z","lastTransitionTime":"2025-11-28T11:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.376093 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.376545 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.376568 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.376599 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.376625 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:00Z","lastTransitionTime":"2025-11-28T11:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.481700 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.481748 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.481764 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.481786 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.481803 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:00Z","lastTransitionTime":"2025-11-28T11:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.584758 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.584818 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.584835 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.584859 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.584876 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:00Z","lastTransitionTime":"2025-11-28T11:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.687747 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.687855 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.687881 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.687915 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.687972 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:00Z","lastTransitionTime":"2025-11-28T11:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.790690 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.790750 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.790770 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.790793 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.790812 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:00Z","lastTransitionTime":"2025-11-28T11:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.894087 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.894153 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.894171 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.894195 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.894216 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:00Z","lastTransitionTime":"2025-11-28T11:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.986109 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.986227 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.986252 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.986282 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:00 crc kubenswrapper[4923]: I1128 11:10:00.986303 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:00Z","lastTransitionTime":"2025-11-28T11:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:01 crc kubenswrapper[4923]: E1128 11:10:01.008513 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.013619 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.013668 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.013685 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.013711 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.013728 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:01Z","lastTransitionTime":"2025-11-28T11:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:01 crc kubenswrapper[4923]: E1128 11:10:01.034238 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.038689 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.038735 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.038752 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.038774 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.038791 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:01Z","lastTransitionTime":"2025-11-28T11:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:01 crc kubenswrapper[4923]: E1128 11:10:01.057660 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.070227 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.070290 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.070310 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.070350 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.070369 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:01Z","lastTransitionTime":"2025-11-28T11:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:01 crc kubenswrapper[4923]: E1128 11:10:01.091347 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.096507 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.096562 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.096578 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.096599 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.096615 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:01Z","lastTransitionTime":"2025-11-28T11:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:01 crc kubenswrapper[4923]: E1128 11:10:01.115201 4923 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T11:10:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f69ffe27-00d5-45aa-bb63-00075a21e0c7\\\",\\\"systemUUID\\\":\\\"bb6b4e53-d23a-4517-9d50-b05bdc3da8e4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: E1128 11:10:01.115418 4923 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.117315 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.117786 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.117816 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.117842 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.117864 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:01Z","lastTransitionTime":"2025-11-28T11:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.168014 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:01 crc kubenswrapper[4923]: E1128 11:10:01.168189 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.195841 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.218339 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.222002 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.222043 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.222058 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.222080 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.222096 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:01Z","lastTransitionTime":"2025-11-28T11:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.239238 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.264556 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.283200 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.301403 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.325242 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.325295 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.325312 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.325334 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.325352 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:01Z","lastTransitionTime":"2025-11-28T11:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.332103 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:31Z\\\",\\\"message\\\":\\\"c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3b-4ff8-8926-04ac25de450e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 11:09:31.089339 6468 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 11:09:31.089404 6468 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.351687 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69bb796e49d5ca00e472f027f1443316695a4e243faff1eec26bc13d67bbc60a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f90a5608dca4e71887975960683dda08b1b5e01f598af251663a968bb7fe56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.367374 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.388894 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.407697 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.428048 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.428115 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.428132 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.428154 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.428170 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:01Z","lastTransitionTime":"2025-11-28T11:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.430898 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.445594 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.459198 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b483d037-b692-45d5-bb83-02e029649100\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-g2kmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.474337 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.490005 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3af1089a-5262-4fa0-85fb-9f992ee6274d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://403d762c4ba4c4f3309ef1b447be25f7882da8a2d03b9376711063165438294f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3513d0400c621295e074b54a00fe7f284c38bebd8e7f11315db91fef9a2a4693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81443f6c4751860dce1d5ecf0f867a1c9641a989cbfd171e71de418f738108c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.509584 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://53821c93696c6770adcfbe02308f05bdb9635578bd1dfa8d3201ecf94fa8b37c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:46Z\\\",\\\"message\\\":\\\"2025-11-28T11:09:01+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_586be321-5a1b-4320-8bdd-14f453eec838\\\\n2025-11-28T11:09:01+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_586be321-5a1b-4320-8bdd-14f453eec838 to /host/opt/cni/bin/\\\\n2025-11-28T11:09:01Z [verbose] multus-daemon started\\\\n2025-11-28T11:09:01Z [verbose] Readiness Indicator file check\\\\n2025-11-28T11:09:46Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:01Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.531054 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.531105 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.531121 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.531146 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.531164 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:01Z","lastTransitionTime":"2025-11-28T11:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.633578 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.633619 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.633636 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.633660 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.633678 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:01Z","lastTransitionTime":"2025-11-28T11:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.737850 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.737915 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.737958 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.737985 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.738003 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:01Z","lastTransitionTime":"2025-11-28T11:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.841661 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.841723 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.841741 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.841763 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.841781 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:01Z","lastTransitionTime":"2025-11-28T11:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.943894 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.943923 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.943958 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.943973 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:01 crc kubenswrapper[4923]: I1128 11:10:01.943985 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:01Z","lastTransitionTime":"2025-11-28T11:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.046515 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.046581 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.046594 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.046635 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.046648 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:02Z","lastTransitionTime":"2025-11-28T11:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.149700 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.149762 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.149776 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.149797 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.149807 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:02Z","lastTransitionTime":"2025-11-28T11:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.168219 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:02 crc kubenswrapper[4923]: E1128 11:10:02.168338 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.168388 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:02 crc kubenswrapper[4923]: E1128 11:10:02.168530 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.168555 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:02 crc kubenswrapper[4923]: E1128 11:10:02.168620 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.195743 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovnkube-controller/2.log" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.198793 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerStarted","Data":"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611"} Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.199332 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.220360 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.233839 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.252366 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.252396 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.252423 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.252438 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.252448 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:02Z","lastTransitionTime":"2025-11-28T11:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.260059 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.269835 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.278694 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b483d037-b692-45d5-bb83-02e029649100\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-g2kmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.289759 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.300627 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.310650 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3af1089a-5262-4fa0-85fb-9f992ee6274d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://403d762c4ba4c4f3309ef1b447be25f7882da8a2d03b9376711063165438294f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3513d0400c621295e074b54a00fe7f284c38bebd8e7f11315db91fef9a2a4693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81443f6c4751860dce1d5ecf0f867a1c9641a989cbfd171e71de418f738108c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.322075 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://53821c93696c6770adcfbe02308f05bdb9635578bd1dfa8d3201ecf94fa8b37c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:46Z\\\",\\\"message\\\":\\\"2025-11-28T11:09:01+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_586be321-5a1b-4320-8bdd-14f453eec838\\\\n2025-11-28T11:09:01+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_586be321-5a1b-4320-8bdd-14f453eec838 to /host/opt/cni/bin/\\\\n2025-11-28T11:09:01Z [verbose] multus-daemon started\\\\n2025-11-28T11:09:01Z [verbose] Readiness Indicator file check\\\\n2025-11-28T11:09:46Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.332287 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.342323 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.353063 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.354446 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.354476 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.354486 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.354505 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.354516 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:02Z","lastTransitionTime":"2025-11-28T11:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.377616 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:31Z\\\",\\\"message\\\":\\\"c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3b-4ff8-8926-04ac25de450e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 11:09:31.089339 6468 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 11:09:31.089404 6468 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:10:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.389703 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69bb796e49d5ca00e472f027f1443316695a4e243faff1eec26bc13d67bbc60a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f90a5608dca4e71887975960683dda08b1b5e01f598af251663a968bb7fe56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.403281 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.415577 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.427086 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:02Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.457221 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.457261 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.457272 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.457287 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.457299 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:02Z","lastTransitionTime":"2025-11-28T11:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.559499 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.559536 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.559545 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.559559 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.559570 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:02Z","lastTransitionTime":"2025-11-28T11:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.663072 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.663129 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.663147 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.663171 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.663188 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:02Z","lastTransitionTime":"2025-11-28T11:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.765834 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.765895 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.765912 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.765985 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.766037 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:02Z","lastTransitionTime":"2025-11-28T11:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.869333 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.869392 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.869413 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.869438 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.869456 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:02Z","lastTransitionTime":"2025-11-28T11:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.972865 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.972922 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.972976 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.973001 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.973017 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:02Z","lastTransitionTime":"2025-11-28T11:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.973322 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.973460 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:02 crc kubenswrapper[4923]: I1128 11:10:02.973543 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:02 crc kubenswrapper[4923]: E1128 11:10:02.973587 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:06.97355202 +0000 UTC m=+146.102236260 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:02 crc kubenswrapper[4923]: E1128 11:10:02.973650 4923 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 11:10:02 crc kubenswrapper[4923]: E1128 11:10:02.973688 4923 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 11:10:02 crc kubenswrapper[4923]: E1128 11:10:02.973733 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 11:11:06.973710885 +0000 UTC m=+146.102395135 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 11:10:02 crc kubenswrapper[4923]: E1128 11:10:02.973794 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 11:11:06.973768206 +0000 UTC m=+146.102452456 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.074466 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:03 crc kubenswrapper[4923]: E1128 11:10:03.074683 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 11:10:03 crc kubenswrapper[4923]: E1128 11:10:03.074721 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 11:10:03 crc kubenswrapper[4923]: E1128 11:10:03.074742 4923 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:10:03 crc kubenswrapper[4923]: E1128 11:10:03.074859 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 11:11:07.074800793 +0000 UTC m=+146.203485043 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.076211 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.076252 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.076268 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.076292 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.076308 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:03Z","lastTransitionTime":"2025-11-28T11:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.168058 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:03 crc kubenswrapper[4923]: E1128 11:10:03.168233 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.175478 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:03 crc kubenswrapper[4923]: E1128 11:10:03.175683 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 11:10:03 crc kubenswrapper[4923]: E1128 11:10:03.175720 4923 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 11:10:03 crc kubenswrapper[4923]: E1128 11:10:03.175740 4923 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:10:03 crc kubenswrapper[4923]: E1128 11:10:03.175814 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 11:11:07.175788398 +0000 UTC m=+146.304472638 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.179216 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.179277 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.179296 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.179325 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.179353 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:03Z","lastTransitionTime":"2025-11-28T11:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.205324 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovnkube-controller/3.log" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.206417 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovnkube-controller/2.log" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.211024 4923 generic.go:334] "Generic (PLEG): container finished" podID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerID="38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611" exitCode=1 Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.211078 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerDied","Data":"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611"} Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.211126 4923 scope.go:117] "RemoveContainer" containerID="026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.212493 4923 scope.go:117] "RemoveContainer" containerID="38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611" Nov 28 11:10:03 crc kubenswrapper[4923]: E1128 11:10:03.212764 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\"" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.227010 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.242200 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b483d037-b692-45d5-bb83-02e029649100\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-g2kmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.259965 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.275579 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.282497 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.282630 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.282651 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.282680 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.282733 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:03Z","lastTransitionTime":"2025-11-28T11:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.296101 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.314053 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.329358 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3af1089a-5262-4fa0-85fb-9f992ee6274d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://403d762c4ba4c4f3309ef1b447be25f7882da8a2d03b9376711063165438294f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3513d0400c621295e074b54a00fe7f284c38bebd8e7f11315db91fef9a2a4693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81443f6c4751860dce1d5ecf0f867a1c9641a989cbfd171e71de418f738108c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.347063 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://53821c93696c6770adcfbe02308f05bdb9635578bd1dfa8d3201ecf94fa8b37c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:46Z\\\",\\\"message\\\":\\\"2025-11-28T11:09:01+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_586be321-5a1b-4320-8bdd-14f453eec838\\\\n2025-11-28T11:09:01+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_586be321-5a1b-4320-8bdd-14f453eec838 to /host/opt/cni/bin/\\\\n2025-11-28T11:09:01Z [verbose] multus-daemon started\\\\n2025-11-28T11:09:01Z [verbose] Readiness Indicator file check\\\\n2025-11-28T11:09:46Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.361800 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.383416 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026b93efaab822fb3d6aee74b0b301389d90f99963ace4d988dc77173ba770ee\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:31Z\\\",\\\"message\\\":\\\"c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3b-4ff8-8926-04ac25de450e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 11:09:31.089339 6468 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 11:09:31.089404 6468 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:10:03Z\\\",\\\"message\\\":\\\"4 services_controller.go:452] Built service openshift-kube-scheduler/scheduler per-node LB for network=default: []services.LB{}\\\\nI1128 11:10:02.680964 6804 services_controller.go:453] Built service openshift-kube-scheduler/scheduler template LB for network=default: []services.LB{}\\\\nI1128 11:10:02.680979 6804 services_controller.go:454] Service openshift-kube-scheduler/scheduler for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI1128 11:10:02.680924 6804 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-scheduler-operator/metrics]} name:Service_openshift-kube-scheduler-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.233:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {1dc899db-4498-4b7a-8437-861940b962e7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1128 11:10:02.681006 6804 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:10:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.385590 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.385698 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.385724 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.385753 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.385774 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:03Z","lastTransitionTime":"2025-11-28T11:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.399426 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69bb796e49d5ca00e472f027f1443316695a4e243faff1eec26bc13d67bbc60a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f90a5608dca4e71887975960683dda08b1b5e01f598af251663a968bb7fe56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.418968 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.437149 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.455987 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.474388 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.489498 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.489550 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.489568 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.489594 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.489612 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:03Z","lastTransitionTime":"2025-11-28T11:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.494292 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.508545 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:03Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.592415 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.592604 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.592625 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.592650 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.592670 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:03Z","lastTransitionTime":"2025-11-28T11:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.695448 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.695505 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.695521 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.695543 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.695559 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:03Z","lastTransitionTime":"2025-11-28T11:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.798750 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.798834 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.798853 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.798878 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.798899 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:03Z","lastTransitionTime":"2025-11-28T11:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.901227 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.901293 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.901314 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.901339 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:03 crc kubenswrapper[4923]: I1128 11:10:03.901357 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:03Z","lastTransitionTime":"2025-11-28T11:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.004296 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.004341 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.004357 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.004378 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.004395 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:04Z","lastTransitionTime":"2025-11-28T11:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.107713 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.107770 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.107794 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.107821 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.107844 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:04Z","lastTransitionTime":"2025-11-28T11:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.168398 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.168416 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.168416 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:04 crc kubenswrapper[4923]: E1128 11:10:04.168813 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:04 crc kubenswrapper[4923]: E1128 11:10:04.168859 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:04 crc kubenswrapper[4923]: E1128 11:10:04.168611 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.210336 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.210398 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.210421 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.210449 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.210470 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:04Z","lastTransitionTime":"2025-11-28T11:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.216771 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovnkube-controller/3.log" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.222202 4923 scope.go:117] "RemoveContainer" containerID="38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611" Nov 28 11:10:04 crc kubenswrapper[4923]: E1128 11:10:04.222450 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\"" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.237739 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-766k2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69fcf39a-3416-4733-a55a-043d5286f8ac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14683c7234bd497157ffe1097cd1eee097e5dd0a9e53a3e39813bc75890961b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dnr6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-766k2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.253892 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b483d037-b692-45d5-bb83-02e029649100\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmpxf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-g2kmb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.272492 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdcd87eab93f0216a48bbd6038ca2bc510b7b36f895bf66de15084be62a9a0e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa3a1d3e4297edce49cfd44925fbd1cb0d51752581df9a406042cc1da6f87121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.291122 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d288688a32f135820030d0816b0e9567100a4732e99c41c8b7f05374c8251f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.313840 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.313896 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.313913 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.313961 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.313978 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:04Z","lastTransitionTime":"2025-11-28T11:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.315660 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2de4c67-b7cc-4d54-bc0a-3e8cb9c13093\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5d7899933378350cf0b863d44216aa3d87b7343f144dcab3470ee44370de0a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://27143610133e2bc3e2aa453a394a9f65fcdeb97a45221a239dd490029e5a3184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79f89c182f50622044f3978965cb214c601f6de4cddc96eaa118f532b2864276\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7996a8b1d06ca35a2ee6c89edc2eaa7e45a6084ab54ff0caaa091c763d3cd47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62d8385e1aa47815f9084d28d70dae899c80019ce59f5725455c594a31c97f22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f6b2e1bc9f8f538d0973d9b1726d2c105d61fcd559df3ab8a2ec77b2d8f44a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a368daf98912d176b66d5aba37e5e91937fbee8c7bd7ce6658993668c8e1525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4j55d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gjj9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.330621 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cf32d1c9-4639-48a9-b972-c9ad6daec543\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee259c68571ed9e58d29ab09558dea3cdcc89ebfb898d6f27e896cb0d80665bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnwc6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.353356 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf07f1a-1aa1-4e4a-b93d-739f0a9f1012\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f7b3757e1d1a5295909db644a475e35e9f9826cd7382a5a3eba86b4a76ac04d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8841f44f1d4af0e73960ce1c7ac5a4da352f85f6b3637315faa716d853be3277\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc960423fd7ee0a6231020982f5b932a6a2d7d0515d6f6df503d6c5d51b82096\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.372499 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3af1089a-5262-4fa0-85fb-9f992ee6274d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://403d762c4ba4c4f3309ef1b447be25f7882da8a2d03b9376711063165438294f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3513d0400c621295e074b54a00fe7f284c38bebd8e7f11315db91fef9a2a4693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81443f6c4751860dce1d5ecf0f867a1c9641a989cbfd171e71de418f738108c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81a3db2980eccec7427b48074b3314c31b8471001076f7a7d9cfae435564097e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.399248 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-h5s2m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84374038-67ce-4dc0-a2c2-6eed9650c604\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://53821c93696c6770adcfbe02308f05bdb9635578bd1dfa8d3201ecf94fa8b37c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:09:46Z\\\",\\\"message\\\":\\\"2025-11-28T11:09:01+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_586be321-5a1b-4320-8bdd-14f453eec838\\\\n2025-11-28T11:09:01+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_586be321-5a1b-4320-8bdd-14f453eec838 to /host/opt/cni/bin/\\\\n2025-11-28T11:09:01Z [verbose] multus-daemon started\\\\n2025-11-28T11:09:01Z [verbose] Readiness Indicator file check\\\\n2025-11-28T11:09:46Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8z7ts\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-h5s2m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.417346 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.417408 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.417431 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.417462 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.417485 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:04Z","lastTransitionTime":"2025-11-28T11:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.430778 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T11:10:03Z\\\",\\\"message\\\":\\\"4 services_controller.go:452] Built service openshift-kube-scheduler/scheduler per-node LB for network=default: []services.LB{}\\\\nI1128 11:10:02.680964 6804 services_controller.go:453] Built service openshift-kube-scheduler/scheduler template LB for network=default: []services.LB{}\\\\nI1128 11:10:02.680979 6804 services_controller.go:454] Service openshift-kube-scheduler/scheduler for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI1128 11:10:02.680924 6804 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-scheduler-operator/metrics]} name:Service_openshift-kube-scheduler-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.233:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {1dc899db-4498-4b7a-8437-861940b962e7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1128 11:10:02.681006 6804 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:10:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qd9rd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-68dth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.444785 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1f111d9-e2b2-44b9-9592-bc5d4fef01f0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69bb796e49d5ca00e472f027f1443316695a4e243faff1eec26bc13d67bbc60a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f90a5608dca4e71887975960683dda08b1b5e01f598af251663a968bb7fe56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vq594\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-8klhg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.465490 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c83fada-ddb5-4acd-99c4-74d9f42e6250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"-12-28 11:08:43 +0000 UTC (now=2025-11-28 11:08:59.275700323 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275749 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 11:08:59.275786 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275797 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 11:08:59.275809 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1128 11:08:59.275835 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 11:08:59.275852 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764328134\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764328133\\\\\\\\\\\\\\\" (2025-11-28 10:08:53 +0000 UTC to 2026-11-28 10:08:53 +0000 UTC (now=2025-11-28 11:08:59.275832266 +0000 UTC))\\\\\\\"\\\\nI1128 11:08:59.275869 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 11:08:59.275889 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 11:08:59.275902 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 11:08:59.275909 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 11:08:59.275921 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1128 11:08:59.275909 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2723273528/tls.crt::/tmp/serving-cert-2723273528/tls.key\\\\\\\"\\\\nF1128 11:08:59.278169 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T11:08:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T11:08:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:08:41Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.482280 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c1e1dcf5efd54a3e3546460813ddc68dae027e669a19eeef6af7246b385ed21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.501909 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.520184 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.520303 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.520344 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.520408 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.520449 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.520471 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:04Z","lastTransitionTime":"2025-11-28T11:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.539115 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T11:08:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.557314 4923 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"092566f7-fc7d-4897-a1f2-4ecedcd3058e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T11:09:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3ad6f76cbc3a3e771dc55c8711f153c18c1c96798a89e0f20b1ff06041129c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T11:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nthb2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T11:09:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bwdth\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T11:10:04Z is after 2025-08-24T17:21:41Z" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.623769 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.623825 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.623844 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.623869 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.623886 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:04Z","lastTransitionTime":"2025-11-28T11:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.727039 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.727342 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.727363 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.727388 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.727407 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:04Z","lastTransitionTime":"2025-11-28T11:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.829980 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.830035 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.830053 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.830075 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.830092 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:04Z","lastTransitionTime":"2025-11-28T11:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.932383 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.932437 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.932453 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.932476 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:04 crc kubenswrapper[4923]: I1128 11:10:04.932493 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:04Z","lastTransitionTime":"2025-11-28T11:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.034848 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.034921 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.034973 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.035004 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.035031 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:05Z","lastTransitionTime":"2025-11-28T11:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.138675 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.138774 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.138792 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.138817 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.138836 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:05Z","lastTransitionTime":"2025-11-28T11:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.172721 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:05 crc kubenswrapper[4923]: E1128 11:10:05.172907 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.241405 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.241437 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.241446 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.241465 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.241475 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:05Z","lastTransitionTime":"2025-11-28T11:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.344460 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.344532 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.344550 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.344578 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.344598 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:05Z","lastTransitionTime":"2025-11-28T11:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.447653 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.447703 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.447720 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.447746 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.447763 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:05Z","lastTransitionTime":"2025-11-28T11:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.550195 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.550270 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.550287 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.550314 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.550334 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:05Z","lastTransitionTime":"2025-11-28T11:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.653008 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.653130 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.653181 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.653209 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.653232 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:05Z","lastTransitionTime":"2025-11-28T11:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.756490 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.756595 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.756616 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.756641 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.756660 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:05Z","lastTransitionTime":"2025-11-28T11:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.859190 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.859224 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.859234 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.859247 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.859255 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:05Z","lastTransitionTime":"2025-11-28T11:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.961898 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.962030 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.962066 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.962099 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:05 crc kubenswrapper[4923]: I1128 11:10:05.962126 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:05Z","lastTransitionTime":"2025-11-28T11:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.065089 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.065145 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.065167 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.065198 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.065220 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:06Z","lastTransitionTime":"2025-11-28T11:10:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.167644 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.167705 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.167708 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:06 crc kubenswrapper[4923]: E1128 11:10:06.167797 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.168140 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.168188 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.168207 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.168232 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:06 crc kubenswrapper[4923]: E1128 11:10:06.168190 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.168249 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:06Z","lastTransitionTime":"2025-11-28T11:10:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:06 crc kubenswrapper[4923]: E1128 11:10:06.168325 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.270850 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.270904 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.270914 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.270927 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.270956 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:06Z","lastTransitionTime":"2025-11-28T11:10:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.374314 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.374432 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.374452 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.374519 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.374538 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:06Z","lastTransitionTime":"2025-11-28T11:10:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.477001 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.477042 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.477054 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.477107 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.477123 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:06Z","lastTransitionTime":"2025-11-28T11:10:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.582059 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.582134 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.582156 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.582195 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.582219 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:06Z","lastTransitionTime":"2025-11-28T11:10:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.686489 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.686546 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.686570 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.686597 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.686651 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:06Z","lastTransitionTime":"2025-11-28T11:10:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.789421 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.789503 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.789540 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.789571 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.789593 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:06Z","lastTransitionTime":"2025-11-28T11:10:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.892641 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.892705 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.892742 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.892771 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.892793 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:06Z","lastTransitionTime":"2025-11-28T11:10:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.995297 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.995355 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.995372 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.995397 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:06 crc kubenswrapper[4923]: I1128 11:10:06.995455 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:06Z","lastTransitionTime":"2025-11-28T11:10:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.098436 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.098504 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.098526 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.098555 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.098578 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:07Z","lastTransitionTime":"2025-11-28T11:10:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.168452 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:07 crc kubenswrapper[4923]: E1128 11:10:07.169104 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.201288 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.201325 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.201335 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.201350 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.201363 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:07Z","lastTransitionTime":"2025-11-28T11:10:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.304841 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.305005 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.305034 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.305057 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.305075 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:07Z","lastTransitionTime":"2025-11-28T11:10:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.408658 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.408719 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.408736 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.408762 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.408779 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:07Z","lastTransitionTime":"2025-11-28T11:10:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.512295 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.512354 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.512372 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.512394 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.512410 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:07Z","lastTransitionTime":"2025-11-28T11:10:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.615504 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.615649 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.615678 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.615703 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.615725 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:07Z","lastTransitionTime":"2025-11-28T11:10:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.718923 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.719015 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.719032 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.719057 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.719076 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:07Z","lastTransitionTime":"2025-11-28T11:10:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.822065 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.822135 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.822152 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.822177 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.822195 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:07Z","lastTransitionTime":"2025-11-28T11:10:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.925275 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.925338 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.925364 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.925393 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:07 crc kubenswrapper[4923]: I1128 11:10:07.925414 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:07Z","lastTransitionTime":"2025-11-28T11:10:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.028299 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.028349 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.028403 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.028425 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.028440 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:08Z","lastTransitionTime":"2025-11-28T11:10:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.131261 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.131343 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.131376 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.131406 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.131426 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:08Z","lastTransitionTime":"2025-11-28T11:10:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.167837 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.167890 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.167912 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:08 crc kubenswrapper[4923]: E1128 11:10:08.168023 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:08 crc kubenswrapper[4923]: E1128 11:10:08.168138 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:08 crc kubenswrapper[4923]: E1128 11:10:08.168255 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.235213 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.235269 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.235285 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.235307 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.235323 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:08Z","lastTransitionTime":"2025-11-28T11:10:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.338263 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.338309 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.338325 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.338347 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.338363 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:08Z","lastTransitionTime":"2025-11-28T11:10:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.441311 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.441356 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.441373 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.441393 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.441408 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:08Z","lastTransitionTime":"2025-11-28T11:10:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.544133 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.544191 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.544252 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.544281 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.544305 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:08Z","lastTransitionTime":"2025-11-28T11:10:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.647603 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.647652 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.647669 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.647691 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.647708 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:08Z","lastTransitionTime":"2025-11-28T11:10:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.750472 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.750539 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.750702 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.750750 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.750768 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:08Z","lastTransitionTime":"2025-11-28T11:10:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.854014 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.854047 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.854063 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.854083 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.854099 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:08Z","lastTransitionTime":"2025-11-28T11:10:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.957515 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.957559 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.957582 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.957608 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:08 crc kubenswrapper[4923]: I1128 11:10:08.957627 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:08Z","lastTransitionTime":"2025-11-28T11:10:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.061516 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.061572 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.061590 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.061613 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.061631 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:09Z","lastTransitionTime":"2025-11-28T11:10:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.163994 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.164035 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.164050 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.164069 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.164085 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:09Z","lastTransitionTime":"2025-11-28T11:10:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.169186 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:09 crc kubenswrapper[4923]: E1128 11:10:09.169317 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.266635 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.267648 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.267806 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.268112 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.268277 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:09Z","lastTransitionTime":"2025-11-28T11:10:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.371323 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.371382 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.371407 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.371433 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.371453 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:09Z","lastTransitionTime":"2025-11-28T11:10:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.474896 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.475033 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.475054 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.475081 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.475102 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:09Z","lastTransitionTime":"2025-11-28T11:10:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.577995 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.578052 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.578073 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.578104 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.578127 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:09Z","lastTransitionTime":"2025-11-28T11:10:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.680304 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.680354 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.680370 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.680392 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.680406 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:09Z","lastTransitionTime":"2025-11-28T11:10:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.783013 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.783071 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.783092 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.783121 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.783146 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:09Z","lastTransitionTime":"2025-11-28T11:10:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.886263 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.886307 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.886322 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.886343 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.886361 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:09Z","lastTransitionTime":"2025-11-28T11:10:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.989524 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.989588 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.989613 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.989639 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:09 crc kubenswrapper[4923]: I1128 11:10:09.989659 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:09Z","lastTransitionTime":"2025-11-28T11:10:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.092382 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.092435 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.092453 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.092474 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.092490 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:10Z","lastTransitionTime":"2025-11-28T11:10:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.168293 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.168354 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.168605 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:10 crc kubenswrapper[4923]: E1128 11:10:10.168770 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:10 crc kubenswrapper[4923]: E1128 11:10:10.168860 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:10 crc kubenswrapper[4923]: E1128 11:10:10.169059 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.185673 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.195317 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.195374 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.195395 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.195424 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.195445 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:10Z","lastTransitionTime":"2025-11-28T11:10:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.299189 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.299228 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.299243 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.299261 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.299317 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:10Z","lastTransitionTime":"2025-11-28T11:10:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.402518 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.402579 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.402602 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.402630 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.402652 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:10Z","lastTransitionTime":"2025-11-28T11:10:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.505441 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.505491 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.505504 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.505524 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.505540 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:10Z","lastTransitionTime":"2025-11-28T11:10:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.607762 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.607823 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.607843 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.607865 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.607883 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:10Z","lastTransitionTime":"2025-11-28T11:10:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.710716 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.710782 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.710810 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.710837 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.710857 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:10Z","lastTransitionTime":"2025-11-28T11:10:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.813914 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.814010 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.814031 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.814058 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.814075 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:10Z","lastTransitionTime":"2025-11-28T11:10:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.916599 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.916668 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.916690 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.916718 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:10 crc kubenswrapper[4923]: I1128 11:10:10.916737 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:10Z","lastTransitionTime":"2025-11-28T11:10:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.019869 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.019922 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.019977 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.020000 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.020019 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:11Z","lastTransitionTime":"2025-11-28T11:10:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.122829 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.122879 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.122888 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.122903 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.122913 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:11Z","lastTransitionTime":"2025-11-28T11:10:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.167866 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:11 crc kubenswrapper[4923]: E1128 11:10:11.168282 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.226153 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.226459 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.226569 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.226657 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.226734 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:11Z","lastTransitionTime":"2025-11-28T11:10:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.234439 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-766k2" podStartSLOduration=72.234419358 podStartE2EDuration="1m12.234419358s" podCreationTimestamp="2025-11-28 11:08:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:11.198071871 +0000 UTC m=+90.326756101" watchObservedRunningTime="2025-11-28 11:10:11.234419358 +0000 UTC m=+90.363103578" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.291392 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-9gjj9" podStartSLOduration=71.291373139 podStartE2EDuration="1m11.291373139s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:11.280747958 +0000 UTC m=+90.409432168" watchObservedRunningTime="2025-11-28 11:10:11.291373139 +0000 UTC m=+90.420057339" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.304669 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-9qvkm" podStartSLOduration=72.304342575 podStartE2EDuration="1m12.304342575s" podCreationTimestamp="2025-11-28 11:08:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:11.291972606 +0000 UTC m=+90.420656816" watchObservedRunningTime="2025-11-28 11:10:11.304342575 +0000 UTC m=+90.433026795" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.321543 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=1.321535431 podStartE2EDuration="1.321535431s" podCreationTimestamp="2025-11-28 11:10:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:11.321355746 +0000 UTC m=+90.450039966" watchObservedRunningTime="2025-11-28 11:10:11.321535431 +0000 UTC m=+90.450219641" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.334281 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.334340 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.334380 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.334404 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.334422 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:11Z","lastTransitionTime":"2025-11-28T11:10:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.340531 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=44.340521538 podStartE2EDuration="44.340521538s" podCreationTimestamp="2025-11-28 11:09:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:11.340420245 +0000 UTC m=+90.469104465" watchObservedRunningTime="2025-11-28 11:10:11.340521538 +0000 UTC m=+90.469205738" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.370894 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.370921 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.370946 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.370958 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.370966 4923 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T11:10:11Z","lastTransitionTime":"2025-11-28T11:10:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.373228 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-h5s2m" podStartSLOduration=72.373217923 podStartE2EDuration="1m12.373217923s" podCreationTimestamp="2025-11-28 11:08:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:11.358423934 +0000 UTC m=+90.487108144" watchObservedRunningTime="2025-11-28 11:10:11.373217923 +0000 UTC m=+90.501902133" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.388088 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=71.388080383 podStartE2EDuration="1m11.388080383s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:11.373910432 +0000 UTC m=+90.502594642" watchObservedRunningTime="2025-11-28 11:10:11.388080383 +0000 UTC m=+90.516764593" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.434791 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456"] Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.435122 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.436738 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.436920 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.437692 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.443229 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.462616 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podStartSLOduration=72.46259857 podStartE2EDuration="1m12.46259857s" podCreationTimestamp="2025-11-28 11:08:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:11.462301832 +0000 UTC m=+90.590986032" watchObservedRunningTime="2025-11-28 11:10:11.46259857 +0000 UTC m=+90.591282800" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.502148 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-8klhg" podStartSLOduration=71.502132818 podStartE2EDuration="1m11.502132818s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:11.501106609 +0000 UTC m=+90.629790809" watchObservedRunningTime="2025-11-28 11:10:11.502132818 +0000 UTC m=+90.630817028" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.535134 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=71.5350975 podStartE2EDuration="1m11.5350975s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:11.534448062 +0000 UTC m=+90.663132272" watchObservedRunningTime="2025-11-28 11:10:11.5350975 +0000 UTC m=+90.663781710" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.567450 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/96414b02-a75a-4030-9dde-02066567bc74-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-64456\" (UID: \"96414b02-a75a-4030-9dde-02066567bc74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.567514 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/96414b02-a75a-4030-9dde-02066567bc74-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-64456\" (UID: \"96414b02-a75a-4030-9dde-02066567bc74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.567577 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/96414b02-a75a-4030-9dde-02066567bc74-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-64456\" (UID: \"96414b02-a75a-4030-9dde-02066567bc74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.567596 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/96414b02-a75a-4030-9dde-02066567bc74-service-ca\") pod \"cluster-version-operator-5c965bbfc6-64456\" (UID: \"96414b02-a75a-4030-9dde-02066567bc74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.567632 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/96414b02-a75a-4030-9dde-02066567bc74-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-64456\" (UID: \"96414b02-a75a-4030-9dde-02066567bc74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.669179 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/96414b02-a75a-4030-9dde-02066567bc74-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-64456\" (UID: \"96414b02-a75a-4030-9dde-02066567bc74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.669270 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/96414b02-a75a-4030-9dde-02066567bc74-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-64456\" (UID: \"96414b02-a75a-4030-9dde-02066567bc74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.669326 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/96414b02-a75a-4030-9dde-02066567bc74-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-64456\" (UID: \"96414b02-a75a-4030-9dde-02066567bc74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.669373 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/96414b02-a75a-4030-9dde-02066567bc74-service-ca\") pod \"cluster-version-operator-5c965bbfc6-64456\" (UID: \"96414b02-a75a-4030-9dde-02066567bc74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.669404 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/96414b02-a75a-4030-9dde-02066567bc74-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-64456\" (UID: \"96414b02-a75a-4030-9dde-02066567bc74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.669412 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/96414b02-a75a-4030-9dde-02066567bc74-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-64456\" (UID: \"96414b02-a75a-4030-9dde-02066567bc74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.669500 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/96414b02-a75a-4030-9dde-02066567bc74-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-64456\" (UID: \"96414b02-a75a-4030-9dde-02066567bc74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.670912 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/96414b02-a75a-4030-9dde-02066567bc74-service-ca\") pod \"cluster-version-operator-5c965bbfc6-64456\" (UID: \"96414b02-a75a-4030-9dde-02066567bc74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.679141 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/96414b02-a75a-4030-9dde-02066567bc74-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-64456\" (UID: \"96414b02-a75a-4030-9dde-02066567bc74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.692811 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/96414b02-a75a-4030-9dde-02066567bc74-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-64456\" (UID: \"96414b02-a75a-4030-9dde-02066567bc74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" Nov 28 11:10:11 crc kubenswrapper[4923]: I1128 11:10:11.745827 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" Nov 28 11:10:12 crc kubenswrapper[4923]: I1128 11:10:12.168565 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:12 crc kubenswrapper[4923]: E1128 11:10:12.169029 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:12 crc kubenswrapper[4923]: I1128 11:10:12.168671 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:12 crc kubenswrapper[4923]: E1128 11:10:12.169145 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:12 crc kubenswrapper[4923]: I1128 11:10:12.168636 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:12 crc kubenswrapper[4923]: E1128 11:10:12.169237 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:12 crc kubenswrapper[4923]: I1128 11:10:12.251276 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" event={"ID":"96414b02-a75a-4030-9dde-02066567bc74","Type":"ContainerStarted","Data":"db5e5954935edcc78861efb990cd3b9867a8ae4e266685a514edf97148996f03"} Nov 28 11:10:12 crc kubenswrapper[4923]: I1128 11:10:12.251342 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" event={"ID":"96414b02-a75a-4030-9dde-02066567bc74","Type":"ContainerStarted","Data":"123b7c9c9c6ad443363e5fb29985fdc66b93c49e42de3eed68ce722011aaf27c"} Nov 28 11:10:13 crc kubenswrapper[4923]: I1128 11:10:13.167725 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:13 crc kubenswrapper[4923]: E1128 11:10:13.167982 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:14 crc kubenswrapper[4923]: I1128 11:10:14.168150 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:14 crc kubenswrapper[4923]: I1128 11:10:14.168200 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:14 crc kubenswrapper[4923]: I1128 11:10:14.168163 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:14 crc kubenswrapper[4923]: E1128 11:10:14.168368 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:14 crc kubenswrapper[4923]: E1128 11:10:14.168514 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:14 crc kubenswrapper[4923]: E1128 11:10:14.168589 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:15 crc kubenswrapper[4923]: I1128 11:10:15.168261 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:15 crc kubenswrapper[4923]: E1128 11:10:15.169259 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:16 crc kubenswrapper[4923]: I1128 11:10:16.168222 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:16 crc kubenswrapper[4923]: I1128 11:10:16.168344 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:16 crc kubenswrapper[4923]: E1128 11:10:16.168404 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:16 crc kubenswrapper[4923]: E1128 11:10:16.168549 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:16 crc kubenswrapper[4923]: I1128 11:10:16.168258 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:16 crc kubenswrapper[4923]: E1128 11:10:16.169837 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:17 crc kubenswrapper[4923]: I1128 11:10:17.168269 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:17 crc kubenswrapper[4923]: E1128 11:10:17.168470 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:17 crc kubenswrapper[4923]: I1128 11:10:17.169536 4923 scope.go:117] "RemoveContainer" containerID="38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611" Nov 28 11:10:17 crc kubenswrapper[4923]: E1128 11:10:17.169870 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\"" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" Nov 28 11:10:18 crc kubenswrapper[4923]: I1128 11:10:18.168546 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:18 crc kubenswrapper[4923]: I1128 11:10:18.168568 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:18 crc kubenswrapper[4923]: E1128 11:10:18.168726 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:18 crc kubenswrapper[4923]: E1128 11:10:18.168839 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:18 crc kubenswrapper[4923]: I1128 11:10:18.169811 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:18 crc kubenswrapper[4923]: E1128 11:10:18.170190 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:18 crc kubenswrapper[4923]: I1128 11:10:18.836293 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs\") pod \"network-metrics-daemon-g2kmb\" (UID: \"b483d037-b692-45d5-bb83-02e029649100\") " pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:18 crc kubenswrapper[4923]: E1128 11:10:18.836579 4923 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 11:10:18 crc kubenswrapper[4923]: E1128 11:10:18.836705 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs podName:b483d037-b692-45d5-bb83-02e029649100 nodeName:}" failed. No retries permitted until 2025-11-28 11:11:22.836670594 +0000 UTC m=+161.965354854 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs") pod "network-metrics-daemon-g2kmb" (UID: "b483d037-b692-45d5-bb83-02e029649100") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 11:10:19 crc kubenswrapper[4923]: I1128 11:10:19.167987 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:19 crc kubenswrapper[4923]: E1128 11:10:19.168190 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:20 crc kubenswrapper[4923]: I1128 11:10:20.167568 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:20 crc kubenswrapper[4923]: I1128 11:10:20.167691 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:20 crc kubenswrapper[4923]: E1128 11:10:20.167745 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:20 crc kubenswrapper[4923]: I1128 11:10:20.167595 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:20 crc kubenswrapper[4923]: E1128 11:10:20.167870 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:20 crc kubenswrapper[4923]: E1128 11:10:20.168037 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:21 crc kubenswrapper[4923]: I1128 11:10:21.168513 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:21 crc kubenswrapper[4923]: E1128 11:10:21.170388 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:22 crc kubenswrapper[4923]: I1128 11:10:22.167861 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:22 crc kubenswrapper[4923]: I1128 11:10:22.167894 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:22 crc kubenswrapper[4923]: I1128 11:10:22.168004 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:22 crc kubenswrapper[4923]: E1128 11:10:22.168543 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:22 crc kubenswrapper[4923]: E1128 11:10:22.168647 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:22 crc kubenswrapper[4923]: E1128 11:10:22.168677 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:22 crc kubenswrapper[4923]: I1128 11:10:22.187123 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-64456" podStartSLOduration=83.187095258 podStartE2EDuration="1m23.187095258s" podCreationTimestamp="2025-11-28 11:08:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:12.276980477 +0000 UTC m=+91.405664717" watchObservedRunningTime="2025-11-28 11:10:22.187095258 +0000 UTC m=+101.315779548" Nov 28 11:10:22 crc kubenswrapper[4923]: I1128 11:10:22.188805 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 28 11:10:23 crc kubenswrapper[4923]: I1128 11:10:23.168352 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:23 crc kubenswrapper[4923]: E1128 11:10:23.168518 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:24 crc kubenswrapper[4923]: I1128 11:10:24.168451 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:24 crc kubenswrapper[4923]: I1128 11:10:24.168488 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:24 crc kubenswrapper[4923]: I1128 11:10:24.168518 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:24 crc kubenswrapper[4923]: E1128 11:10:24.168637 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:24 crc kubenswrapper[4923]: E1128 11:10:24.168740 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:24 crc kubenswrapper[4923]: E1128 11:10:24.168911 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:25 crc kubenswrapper[4923]: I1128 11:10:25.168251 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:25 crc kubenswrapper[4923]: E1128 11:10:25.168417 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:26 crc kubenswrapper[4923]: I1128 11:10:26.168465 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:26 crc kubenswrapper[4923]: I1128 11:10:26.168543 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:26 crc kubenswrapper[4923]: I1128 11:10:26.168686 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:26 crc kubenswrapper[4923]: E1128 11:10:26.169481 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:26 crc kubenswrapper[4923]: E1128 11:10:26.169252 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:26 crc kubenswrapper[4923]: E1128 11:10:26.169651 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:27 crc kubenswrapper[4923]: I1128 11:10:27.168522 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:27 crc kubenswrapper[4923]: E1128 11:10:27.169060 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:28 crc kubenswrapper[4923]: I1128 11:10:28.168178 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:28 crc kubenswrapper[4923]: I1128 11:10:28.168203 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:28 crc kubenswrapper[4923]: I1128 11:10:28.168311 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:28 crc kubenswrapper[4923]: E1128 11:10:28.168519 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:28 crc kubenswrapper[4923]: E1128 11:10:28.169411 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:28 crc kubenswrapper[4923]: E1128 11:10:28.169485 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:29 crc kubenswrapper[4923]: I1128 11:10:29.168466 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:29 crc kubenswrapper[4923]: E1128 11:10:29.169120 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:29 crc kubenswrapper[4923]: I1128 11:10:29.169535 4923 scope.go:117] "RemoveContainer" containerID="38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611" Nov 28 11:10:29 crc kubenswrapper[4923]: E1128 11:10:29.169780 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-68dth_openshift-ovn-kubernetes(08e03349-56fc-4b2d-93d3-cf2405a4b7ad)\"" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" Nov 28 11:10:30 crc kubenswrapper[4923]: I1128 11:10:30.167594 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:30 crc kubenswrapper[4923]: I1128 11:10:30.167672 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:30 crc kubenswrapper[4923]: I1128 11:10:30.167757 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:30 crc kubenswrapper[4923]: E1128 11:10:30.167918 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:30 crc kubenswrapper[4923]: E1128 11:10:30.168078 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:30 crc kubenswrapper[4923]: E1128 11:10:30.168193 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:31 crc kubenswrapper[4923]: I1128 11:10:31.168275 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:31 crc kubenswrapper[4923]: E1128 11:10:31.172280 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:31 crc kubenswrapper[4923]: I1128 11:10:31.217868 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=9.217843866 podStartE2EDuration="9.217843866s" podCreationTimestamp="2025-11-28 11:10:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:31.217411973 +0000 UTC m=+110.346096233" watchObservedRunningTime="2025-11-28 11:10:31.217843866 +0000 UTC m=+110.346528106" Nov 28 11:10:32 crc kubenswrapper[4923]: I1128 11:10:32.167901 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:32 crc kubenswrapper[4923]: I1128 11:10:32.167973 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:32 crc kubenswrapper[4923]: I1128 11:10:32.167976 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:32 crc kubenswrapper[4923]: E1128 11:10:32.168113 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:32 crc kubenswrapper[4923]: E1128 11:10:32.168253 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:32 crc kubenswrapper[4923]: E1128 11:10:32.168384 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:33 crc kubenswrapper[4923]: I1128 11:10:33.168547 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:33 crc kubenswrapper[4923]: E1128 11:10:33.168802 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:33 crc kubenswrapper[4923]: I1128 11:10:33.330688 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-h5s2m_84374038-67ce-4dc0-a2c2-6eed9650c604/kube-multus/1.log" Nov 28 11:10:33 crc kubenswrapper[4923]: I1128 11:10:33.331690 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-h5s2m_84374038-67ce-4dc0-a2c2-6eed9650c604/kube-multus/0.log" Nov 28 11:10:33 crc kubenswrapper[4923]: I1128 11:10:33.331957 4923 generic.go:334] "Generic (PLEG): container finished" podID="84374038-67ce-4dc0-a2c2-6eed9650c604" containerID="53821c93696c6770adcfbe02308f05bdb9635578bd1dfa8d3201ecf94fa8b37c" exitCode=1 Nov 28 11:10:33 crc kubenswrapper[4923]: I1128 11:10:33.332108 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-h5s2m" event={"ID":"84374038-67ce-4dc0-a2c2-6eed9650c604","Type":"ContainerDied","Data":"53821c93696c6770adcfbe02308f05bdb9635578bd1dfa8d3201ecf94fa8b37c"} Nov 28 11:10:33 crc kubenswrapper[4923]: I1128 11:10:33.332219 4923 scope.go:117] "RemoveContainer" containerID="addcc8dd720a66b5089f7fa541a454de2be862cc524d1f8e4c948059ef70e20f" Nov 28 11:10:33 crc kubenswrapper[4923]: I1128 11:10:33.337034 4923 scope.go:117] "RemoveContainer" containerID="53821c93696c6770adcfbe02308f05bdb9635578bd1dfa8d3201ecf94fa8b37c" Nov 28 11:10:33 crc kubenswrapper[4923]: E1128 11:10:33.341251 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-h5s2m_openshift-multus(84374038-67ce-4dc0-a2c2-6eed9650c604)\"" pod="openshift-multus/multus-h5s2m" podUID="84374038-67ce-4dc0-a2c2-6eed9650c604" Nov 28 11:10:34 crc kubenswrapper[4923]: I1128 11:10:34.168062 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:34 crc kubenswrapper[4923]: I1128 11:10:34.168070 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:34 crc kubenswrapper[4923]: E1128 11:10:34.168220 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:34 crc kubenswrapper[4923]: E1128 11:10:34.168375 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:34 crc kubenswrapper[4923]: I1128 11:10:34.168085 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:34 crc kubenswrapper[4923]: E1128 11:10:34.169694 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:34 crc kubenswrapper[4923]: I1128 11:10:34.337886 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-h5s2m_84374038-67ce-4dc0-a2c2-6eed9650c604/kube-multus/1.log" Nov 28 11:10:35 crc kubenswrapper[4923]: I1128 11:10:35.168728 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:35 crc kubenswrapper[4923]: E1128 11:10:35.168990 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:36 crc kubenswrapper[4923]: I1128 11:10:36.167779 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:36 crc kubenswrapper[4923]: I1128 11:10:36.167835 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:36 crc kubenswrapper[4923]: I1128 11:10:36.167779 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:36 crc kubenswrapper[4923]: E1128 11:10:36.167985 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:36 crc kubenswrapper[4923]: E1128 11:10:36.168113 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:36 crc kubenswrapper[4923]: E1128 11:10:36.168267 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:37 crc kubenswrapper[4923]: I1128 11:10:37.168009 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:37 crc kubenswrapper[4923]: E1128 11:10:37.168184 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:38 crc kubenswrapper[4923]: I1128 11:10:38.168546 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:38 crc kubenswrapper[4923]: E1128 11:10:38.168734 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:38 crc kubenswrapper[4923]: I1128 11:10:38.169054 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:38 crc kubenswrapper[4923]: E1128 11:10:38.169146 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:38 crc kubenswrapper[4923]: I1128 11:10:38.169266 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:38 crc kubenswrapper[4923]: E1128 11:10:38.169480 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:39 crc kubenswrapper[4923]: I1128 11:10:39.168330 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:39 crc kubenswrapper[4923]: E1128 11:10:39.168472 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:40 crc kubenswrapper[4923]: I1128 11:10:40.168522 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:40 crc kubenswrapper[4923]: I1128 11:10:40.168619 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:40 crc kubenswrapper[4923]: E1128 11:10:40.168724 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:40 crc kubenswrapper[4923]: I1128 11:10:40.168643 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:40 crc kubenswrapper[4923]: E1128 11:10:40.168998 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:40 crc kubenswrapper[4923]: E1128 11:10:40.168825 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:41 crc kubenswrapper[4923]: E1128 11:10:41.129643 4923 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 28 11:10:41 crc kubenswrapper[4923]: I1128 11:10:41.168588 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:41 crc kubenswrapper[4923]: E1128 11:10:41.170576 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:41 crc kubenswrapper[4923]: E1128 11:10:41.267615 4923 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 11:10:42 crc kubenswrapper[4923]: I1128 11:10:42.167693 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:42 crc kubenswrapper[4923]: I1128 11:10:42.167712 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:42 crc kubenswrapper[4923]: E1128 11:10:42.167837 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:42 crc kubenswrapper[4923]: E1128 11:10:42.167902 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:42 crc kubenswrapper[4923]: I1128 11:10:42.168176 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:42 crc kubenswrapper[4923]: E1128 11:10:42.168260 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:43 crc kubenswrapper[4923]: I1128 11:10:43.168091 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:43 crc kubenswrapper[4923]: E1128 11:10:43.168332 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:43 crc kubenswrapper[4923]: I1128 11:10:43.169589 4923 scope.go:117] "RemoveContainer" containerID="38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611" Nov 28 11:10:43 crc kubenswrapper[4923]: I1128 11:10:43.378346 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovnkube-controller/3.log" Nov 28 11:10:43 crc kubenswrapper[4923]: I1128 11:10:43.383756 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerStarted","Data":"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297"} Nov 28 11:10:43 crc kubenswrapper[4923]: I1128 11:10:43.384516 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:10:43 crc kubenswrapper[4923]: I1128 11:10:43.427185 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" podStartSLOduration=103.427169667 podStartE2EDuration="1m43.427169667s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:43.427160586 +0000 UTC m=+122.555844836" watchObservedRunningTime="2025-11-28 11:10:43.427169667 +0000 UTC m=+122.555853877" Nov 28 11:10:44 crc kubenswrapper[4923]: I1128 11:10:44.093194 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-g2kmb"] Nov 28 11:10:44 crc kubenswrapper[4923]: I1128 11:10:44.093316 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:44 crc kubenswrapper[4923]: E1128 11:10:44.093443 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:44 crc kubenswrapper[4923]: I1128 11:10:44.167959 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:44 crc kubenswrapper[4923]: I1128 11:10:44.168043 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:44 crc kubenswrapper[4923]: E1128 11:10:44.168118 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:44 crc kubenswrapper[4923]: I1128 11:10:44.168145 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:44 crc kubenswrapper[4923]: E1128 11:10:44.168295 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:44 crc kubenswrapper[4923]: E1128 11:10:44.168389 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:46 crc kubenswrapper[4923]: I1128 11:10:46.168173 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:46 crc kubenswrapper[4923]: I1128 11:10:46.168197 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:46 crc kubenswrapper[4923]: E1128 11:10:46.169215 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:46 crc kubenswrapper[4923]: I1128 11:10:46.168295 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:46 crc kubenswrapper[4923]: I1128 11:10:46.168295 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:46 crc kubenswrapper[4923]: E1128 11:10:46.169383 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:46 crc kubenswrapper[4923]: E1128 11:10:46.169464 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:46 crc kubenswrapper[4923]: E1128 11:10:46.169592 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:46 crc kubenswrapper[4923]: E1128 11:10:46.269514 4923 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 11:10:47 crc kubenswrapper[4923]: I1128 11:10:47.168714 4923 scope.go:117] "RemoveContainer" containerID="53821c93696c6770adcfbe02308f05bdb9635578bd1dfa8d3201ecf94fa8b37c" Nov 28 11:10:48 crc kubenswrapper[4923]: I1128 11:10:48.168147 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:48 crc kubenswrapper[4923]: I1128 11:10:48.168197 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:48 crc kubenswrapper[4923]: I1128 11:10:48.168151 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:48 crc kubenswrapper[4923]: E1128 11:10:48.168333 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:48 crc kubenswrapper[4923]: E1128 11:10:48.168702 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:48 crc kubenswrapper[4923]: E1128 11:10:48.168750 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:48 crc kubenswrapper[4923]: I1128 11:10:48.169178 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:48 crc kubenswrapper[4923]: E1128 11:10:48.169369 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:48 crc kubenswrapper[4923]: I1128 11:10:48.411050 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-h5s2m_84374038-67ce-4dc0-a2c2-6eed9650c604/kube-multus/1.log" Nov 28 11:10:48 crc kubenswrapper[4923]: I1128 11:10:48.411133 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-h5s2m" event={"ID":"84374038-67ce-4dc0-a2c2-6eed9650c604","Type":"ContainerStarted","Data":"4e5d464fbc192436a17d1b829b59f434eeda1bcd59ca123e60356e99ed41be9a"} Nov 28 11:10:50 crc kubenswrapper[4923]: I1128 11:10:50.167726 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:50 crc kubenswrapper[4923]: E1128 11:10:50.167970 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 11:10:50 crc kubenswrapper[4923]: I1128 11:10:50.168045 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:50 crc kubenswrapper[4923]: E1128 11:10:50.168128 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 11:10:50 crc kubenswrapper[4923]: I1128 11:10:50.168471 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:50 crc kubenswrapper[4923]: E1128 11:10:50.168553 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 11:10:50 crc kubenswrapper[4923]: I1128 11:10:50.168725 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:50 crc kubenswrapper[4923]: E1128 11:10:50.168821 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-g2kmb" podUID="b483d037-b692-45d5-bb83-02e029649100" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.168635 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.168687 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.168745 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.168817 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.172545 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.172635 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.172887 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.173064 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.173252 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.173324 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.458670 4923 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.520116 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7l2lz"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.520635 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.535979 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.537006 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.537186 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.545092 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-lhbv8"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.545796 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.553274 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-m4snv"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.554100 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-m4snv" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.557038 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs"] Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.557172 4923 reflector.go:561] object-"openshift-apiserver"/"etcd-client": failed to list *v1.Secret: secrets "etcd-client" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.557221 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"etcd-client\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"etcd-client\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.557647 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.563356 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w6d2b"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.564215 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w6d2b" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.565161 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-28cv6"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596008 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-etcd-serving-ca\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596044 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c708f3f9-1c78-43ee-8630-add159a49c49-audit-dir\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596064 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c708f3f9-1c78-43ee-8630-add159a49c49-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596080 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c708f3f9-1c78-43ee-8630-add159a49c49-serving-cert\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596098 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbhbm\" (UniqueName: \"kubernetes.io/projected/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-kube-api-access-bbhbm\") pod \"controller-manager-879f6c89f-7l2lz\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596115 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-serving-cert\") pod \"controller-manager-879f6c89f-7l2lz\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596132 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c708f3f9-1c78-43ee-8630-add159a49c49-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596146 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c708f3f9-1c78-43ee-8630-add159a49c49-audit-policies\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596159 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c708f3f9-1c78-43ee-8630-add159a49c49-encryption-config\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596173 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/37ddbfb0-c042-460d-b772-9cdd214a79a1-audit-dir\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596189 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-audit\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596203 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-client-ca\") pod \"controller-manager-879f6c89f-7l2lz\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596217 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-7l2lz\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596230 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b-config\") pod \"machine-api-operator-5694c8668f-m4snv\" (UID: \"c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m4snv" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596253 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b-images\") pod \"machine-api-operator-5694c8668f-m4snv\" (UID: \"c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m4snv" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596269 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-etcd-client\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596299 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bdxq\" (UniqueName: \"kubernetes.io/projected/c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b-kube-api-access-6bdxq\") pod \"machine-api-operator-5694c8668f-m4snv\" (UID: \"c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m4snv" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596318 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4l8c\" (UniqueName: \"kubernetes.io/projected/c708f3f9-1c78-43ee-8630-add159a49c49-kube-api-access-s4l8c\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596332 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-m4snv\" (UID: \"c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m4snv" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596352 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-serving-cert\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596374 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-image-import-ca\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596394 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-config\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596408 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-trusted-ca-bundle\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596422 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/37ddbfb0-c042-460d-b772-9cdd214a79a1-node-pullsecrets\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596435 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-config\") pod \"controller-manager-879f6c89f-7l2lz\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596450 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c708f3f9-1c78-43ee-8630-add159a49c49-etcd-client\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596474 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-encryption-config\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596487 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8k6g4\" (UniqueName: \"kubernetes.io/projected/37ddbfb0-c042-460d-b772-9cdd214a79a1-kube-api-access-8k6g4\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.596914 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.597083 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.597183 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.597277 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.597443 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.597522 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.597594 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.597653 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.597873 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.598035 4923 reflector.go:561] object-"openshift-apiserver"/"etcd-serving-ca": failed to list *v1.ConfigMap: configmaps "etcd-serving-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.598126 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"etcd-serving-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"etcd-serving-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.598078 4923 reflector.go:561] object-"openshift-apiserver"/"serving-cert": failed to list *v1.Secret: secrets "serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.598242 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.598252 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.597630 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.599010 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.599253 4923 reflector.go:561] object-"openshift-apiserver"/"image-import-ca": failed to list *v1.ConfigMap: configmaps "image-import-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.599327 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"image-import-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"image-import-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.598378 4923 reflector.go:561] object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff": failed to list *v1.Secret: secrets "openshift-apiserver-sa-dockercfg-djjff" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.599431 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"openshift-apiserver-sa-dockercfg-djjff\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-apiserver-sa-dockercfg-djjff\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.598396 4923 reflector.go:561] object-"openshift-apiserver"/"encryption-config-1": failed to list *v1.Secret: secrets "encryption-config-1" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.599454 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"encryption-config-1\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"encryption-config-1\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.598428 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.598429 4923 reflector.go:561] object-"openshift-apiserver"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.599519 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.598459 4923 reflector.go:561] object-"openshift-apiserver"/"config": failed to list *v1.ConfigMap: configmaps "config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.599535 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.598465 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.598474 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.598490 4923 reflector.go:561] object-"openshift-apiserver"/"trusted-ca-bundle": failed to list *v1.ConfigMap: configmaps "trusted-ca-bundle" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.599657 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"trusted-ca-bundle\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"trusted-ca-bundle\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.598516 4923 reflector.go:561] object-"openshift-apiserver"/"audit-1": failed to list *v1.ConfigMap: configmaps "audit-1" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.599674 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"audit-1\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"audit-1\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.598538 4923 reflector.go:561] object-"openshift-apiserver"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.599689 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.598816 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.598868 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.598905 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.599844 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-j6dnf"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.600093 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.600239 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.600320 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-cbtlt"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.600357 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-j6dnf" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.600498 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.600572 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.600823 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.597605 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.600911 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.601359 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.601996 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-2vsdg"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.602285 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.602854 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.603494 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.614547 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.615010 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.649430 4923 reflector.go:561] object-"openshift-config-operator"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-config-operator": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.649666 4923 reflector.go:561] object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w": failed to list *v1.Secret: secrets "cluster-samples-operator-dockercfg-xpp9w" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-cluster-samples-operator": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.649709 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-samples-operator\"/\"cluster-samples-operator-dockercfg-xpp9w\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"cluster-samples-operator-dockercfg-xpp9w\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-cluster-samples-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.649749 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-tz5lm"] Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.649761 4923 reflector.go:561] object-"openshift-authentication-operator"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.649772 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.649770 4923 reflector.go:561] object-"openshift-authentication-operator"/"service-ca-bundle": failed to list *v1.ConfigMap: configmaps "service-ca-bundle" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.649820 4923 reflector.go:561] object-"openshift-config-operator"/"config-operator-serving-cert": failed to list *v1.Secret: secrets "config-operator-serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-config-operator": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.649834 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-config-operator\"/\"config-operator-serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"config-operator-serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.649841 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"service-ca-bundle\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"service-ca-bundle\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.649797 4923 reflector.go:561] object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z": failed to list *v1.Secret: secrets "openshift-config-operator-dockercfg-7pc5z" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-config-operator": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.649872 4923 reflector.go:561] object-"openshift-cluster-samples-operator"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-cluster-samples-operator": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.649886 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.649891 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-config-operator\"/\"openshift-config-operator-dockercfg-7pc5z\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-config-operator-dockercfg-7pc5z\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.649582 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.649908 4923 reflector.go:561] object-"openshift-authentication-operator"/"serving-cert": failed to list *v1.Secret: secrets "serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.649968 4923 reflector.go:561] object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj": failed to list *v1.Secret: secrets "authentication-operator-dockercfg-mz9bj" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.649969 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.649982 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"authentication-operator-dockercfg-mz9bj\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"authentication-operator-dockercfg-mz9bj\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.649927 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.650001 4923 reflector.go:561] object-"openshift-authentication-operator"/"authentication-operator-config": failed to list *v1.ConfigMap: configmaps "authentication-operator-config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.650021 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"authentication-operator-config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"authentication-operator-config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.649887 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-samples-operator\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-cluster-samples-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.650041 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.649600 4923 reflector.go:561] object-"openshift-console"/"trusted-ca-bundle": failed to list *v1.ConfigMap: configmaps "trusted-ca-bundle" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-console": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.649619 4923 reflector.go:561] object-"openshift-config-operator"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-config-operator": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.649971 4923 reflector.go:561] object-"openshift-authentication-operator"/"trusted-ca-bundle": failed to list *v1.ConfigMap: configmaps "trusted-ca-bundle" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.650084 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-config-operator\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.649639 4923 reflector.go:561] object-"openshift-console"/"service-ca": failed to list *v1.ConfigMap: configmaps "service-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-console": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.650090 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"trusted-ca-bundle\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"trusted-ca-bundle\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.650065 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-console\"/\"trusted-ca-bundle\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"trusted-ca-bundle\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-console\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.650105 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-console\"/\"service-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"service-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-console\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.649532 4923 reflector.go:561] object-"openshift-route-controller-manager"/"serving-cert": failed to list *v1.Secret: secrets "serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-route-controller-manager": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.650363 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-route-controller-manager\"/\"serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-route-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.650545 4923 reflector.go:561] object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-cluster-samples-operator": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.650565 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-samples-operator\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-cluster-samples-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.649685 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-config-operator\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.657274 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-tz5lm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.658418 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.659225 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.659338 4923 reflector.go:561] object-"openshift-route-controller-manager"/"config": failed to list *v1.ConfigMap: configmaps "config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-route-controller-manager": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.659375 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-route-controller-manager\"/\"config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-route-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.659390 4923 reflector.go:561] object-"openshift-console"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-console": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.659413 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-console\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-console\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.659430 4923 reflector.go:561] object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2": failed to list *v1.Secret: secrets "route-controller-manager-sa-dockercfg-h2zr2" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-route-controller-manager": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.659453 4923 reflector.go:561] object-"openshift-console"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-console": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.659466 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-console\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-console\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.659448 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-route-controller-manager\"/\"route-controller-manager-sa-dockercfg-h2zr2\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"route-controller-manager-sa-dockercfg-h2zr2\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-route-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.659491 4923 reflector.go:561] object-"openshift-console"/"console-dockercfg-f62pw": failed to list *v1.Secret: secrets "console-dockercfg-f62pw" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-console": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.659506 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-console\"/\"console-dockercfg-f62pw\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"console-dockercfg-f62pw\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-console\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.659531 4923 reflector.go:561] object-"openshift-route-controller-manager"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-route-controller-manager": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.659571 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-route-controller-manager\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-route-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.660335 4923 reflector.go:561] object-"openshift-console"/"console-oauth-config": failed to list *v1.Secret: secrets "console-oauth-config" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-console": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.660363 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-console\"/\"console-oauth-config\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"console-oauth-config\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-console\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.660377 4923 reflector.go:561] object-"openshift-route-controller-manager"/"client-ca": failed to list *v1.ConfigMap: configmaps "client-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-route-controller-manager": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.660403 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-route-controller-manager\"/\"client-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"client-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-route-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.660425 4923 reflector.go:561] object-"openshift-console"/"oauth-serving-cert": failed to list *v1.ConfigMap: configmaps "oauth-serving-cert" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-console": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.660448 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-console\"/\"oauth-serving-cert\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"oauth-serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-console\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.660631 4923 reflector.go:561] object-"openshift-console"/"console-serving-cert": failed to list *v1.Secret: secrets "console-serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-console": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.660719 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-console\"/\"console-serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"console-serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-console\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.660836 4923 reflector.go:561] object-"openshift-console"/"console-config": failed to list *v1.ConfigMap: configmaps "console-config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-console": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.660907 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-console\"/\"console-config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"console-config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-console\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.661300 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.661524 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.661813 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.661826 4923 reflector.go:561] object-"openshift-authentication-operator"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.661852 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.661913 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.661850 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.663285 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.663811 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-fd2jt"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.664206 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-fd2jt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.664516 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.666657 4923 reflector.go:561] object-"openshift-route-controller-manager"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-route-controller-manager": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.666682 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-route-controller-manager\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-route-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.667000 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.667630 4923 reflector.go:561] object-"openshift-cluster-samples-operator"/"samples-operator-tls": failed to list *v1.Secret: secrets "samples-operator-tls" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-cluster-samples-operator": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.667654 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-samples-operator\"/\"samples-operator-tls\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"samples-operator-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-cluster-samples-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.669189 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.669558 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.669553 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-g855d"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.670063 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.670544 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-w99dl"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.671194 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-w99dl" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.671685 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jhjvz"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.671973 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jhjvz" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.675777 4923 reflector.go:561] object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr": failed to list *v1.Secret: secrets "console-operator-dockercfg-4xjcr" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-console-operator": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.675861 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-console-operator\"/\"console-operator-dockercfg-4xjcr\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"console-operator-dockercfg-4xjcr\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-console-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.681654 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-dfffg"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.682176 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.683643 4923 reflector.go:561] object-"openshift-console"/"default-dockercfg-chnjx": failed to list *v1.Secret: secrets "default-dockercfg-chnjx" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-console": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.683753 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-console\"/\"default-dockercfg-chnjx\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"default-dockercfg-chnjx\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-console\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.683749 4923 reflector.go:561] object-"openshift-cluster-machine-approver"/"kube-rbac-proxy": failed to list *v1.ConfigMap: configmaps "kube-rbac-proxy" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-cluster-machine-approver": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.683867 4923 reflector.go:561] object-"openshift-cluster-machine-approver"/"machine-approver-config": failed to list *v1.ConfigMap: configmaps "machine-approver-config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-cluster-machine-approver": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.683898 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-machine-approver\"/\"machine-approver-config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"machine-approver-config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-cluster-machine-approver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.683719 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.683878 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-machine-approver\"/\"kube-rbac-proxy\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-rbac-proxy\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-cluster-machine-approver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.683775 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.688331 4923 reflector.go:561] object-"openshift-console-operator"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-console-operator": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.688369 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-console-operator\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-console-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.688434 4923 reflector.go:561] object-"openshift-cluster-machine-approver"/"machine-approver-tls": failed to list *v1.Secret: secrets "machine-approver-tls" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-cluster-machine-approver": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.688447 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-machine-approver\"/\"machine-approver-tls\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"machine-approver-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-cluster-machine-approver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.688650 4923 reflector.go:561] object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4": failed to list *v1.Secret: secrets "machine-approver-sa-dockercfg-nl2j4" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-cluster-machine-approver": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.688665 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-machine-approver\"/\"machine-approver-sa-dockercfg-nl2j4\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"machine-approver-sa-dockercfg-nl2j4\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-cluster-machine-approver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.688702 4923 reflector.go:561] object-"openshift-cluster-machine-approver"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-cluster-machine-approver": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.688712 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-machine-approver\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-cluster-machine-approver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.688744 4923 reflector.go:561] object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-cluster-machine-approver": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.688754 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-machine-approver\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-cluster-machine-approver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.689013 4923 reflector.go:561] object-"openshift-image-registry"/"trusted-ca": failed to list *v1.ConfigMap: configmaps "trusted-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-image-registry": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.689031 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-image-registry\"/\"trusted-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"trusted-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-image-registry\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.689315 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.689447 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.690002 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.690234 4923 reflector.go:561] object-"openshift-image-registry"/"installation-pull-secrets": failed to list *v1.Secret: secrets "installation-pull-secrets" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-image-registry": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.690315 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-image-registry\"/\"installation-pull-secrets\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"installation-pull-secrets\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-image-registry\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.696173 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.697231 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.698549 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.700313 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.730479 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.732132 4923 reflector.go:561] object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx": failed to list *v1.Secret: secrets "cluster-image-registry-operator-dockercfg-m4qtx" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-image-registry": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.732184 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-image-registry\"/\"cluster-image-registry-operator-dockercfg-m4qtx\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"cluster-image-registry-operator-dockercfg-m4qtx\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-image-registry\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.734988 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.736380 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.736724 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.736840 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll"] Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.736995 4923 reflector.go:561] object-"openshift-image-registry"/"image-registry-operator-tls": failed to list *v1.Secret: secrets "image-registry-operator-tls" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-image-registry": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.737030 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-image-registry\"/\"image-registry-operator-tls\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"image-registry-operator-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-image-registry\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737164 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737294 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-serving-cert\") pod \"controller-manager-879f6c89f-7l2lz\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737323 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c708f3f9-1c78-43ee-8630-add159a49c49-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737342 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-config\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737362 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c708f3f9-1c78-43ee-8630-add159a49c49-encryption-config\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737382 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737380 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737503 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c708f3f9-1c78-43ee-8630-add159a49c49-audit-policies\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737527 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-audit\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737546 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/37ddbfb0-c042-460d-b772-9cdd214a79a1-audit-dir\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737566 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b-config\") pod \"machine-api-operator-5694c8668f-m4snv\" (UID: \"c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m4snv" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737586 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-client-ca\") pod \"route-controller-manager-6576b87f9c-p7flx\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737605 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af02ba5e-e22d-4363-a5e7-6a819c881eeb-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-w6d2b\" (UID: \"af02ba5e-e22d-4363-a5e7-6a819c881eeb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w6d2b" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737623 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-client-ca\") pod \"controller-manager-879f6c89f-7l2lz\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737638 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-7l2lz\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737657 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4926f4a2-0ee6-444b-a113-f6ee1d162d72-serving-cert\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737672 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-service-ca-bundle\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737687 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-serving-cert\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737702 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-service-ca\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737719 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/e7f08880-f8db-4170-8d1d-1bccb2df10f4-available-featuregates\") pod \"openshift-config-operator-7777fb866f-mqbzf\" (UID: \"e7f08880-f8db-4170-8d1d-1bccb2df10f4\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737748 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b-images\") pod \"machine-api-operator-5694c8668f-m4snv\" (UID: \"c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m4snv" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737765 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2n7j\" (UniqueName: \"kubernetes.io/projected/e7f08880-f8db-4170-8d1d-1bccb2df10f4-kube-api-access-j2n7j\") pod \"openshift-config-operator-7777fb866f-mqbzf\" (UID: \"e7f08880-f8db-4170-8d1d-1bccb2df10f4\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737780 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-config\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737796 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-config\") pod \"route-controller-manager-6576b87f9c-p7flx\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737813 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdedfd6e-9082-4411-b128-fc9806c67bd3-serving-cert\") pod \"route-controller-manager-6576b87f9c-p7flx\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737831 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-etcd-client\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737857 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-oauth-serving-cert\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737873 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff-config\") pod \"console-operator-58897d9998-tz5lm\" (UID: \"abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff\") " pod="openshift-console-operator/console-operator-58897d9998-tz5lm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737901 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bdxq\" (UniqueName: \"kubernetes.io/projected/c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b-kube-api-access-6bdxq\") pod \"machine-api-operator-5694c8668f-m4snv\" (UID: \"c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m4snv" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737919 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737951 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vtcd\" (UniqueName: \"kubernetes.io/projected/ad44a891-fc97-4154-8f93-bbd276c5c18a-kube-api-access-7vtcd\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737968 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.737985 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4l8c\" (UniqueName: \"kubernetes.io/projected/c708f3f9-1c78-43ee-8630-add159a49c49-kube-api-access-s4l8c\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738000 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-m4snv\" (UID: \"c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m4snv" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738016 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/889057c8-1eb2-4829-b1d5-a906b88eb68c-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-j6dnf\" (UID: \"889057c8-1eb2-4829-b1d5-a906b88eb68c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-j6dnf" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738033 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738052 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7f08880-f8db-4170-8d1d-1bccb2df10f4-serving-cert\") pod \"openshift-config-operator-7777fb866f-mqbzf\" (UID: \"e7f08880-f8db-4170-8d1d-1bccb2df10f4\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738072 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwmwf\" (UniqueName: \"kubernetes.io/projected/aa97fc63-7e09-4217-9fb9-78fca4703f04-kube-api-access-wwmwf\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738086 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhqs5\" (UniqueName: \"kubernetes.io/projected/abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff-kube-api-access-rhqs5\") pod \"console-operator-58897d9998-tz5lm\" (UID: \"abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff\") " pod="openshift-console-operator/console-operator-58897d9998-tz5lm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738106 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-serving-cert\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738121 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dldh9\" (UniqueName: \"kubernetes.io/projected/4926f4a2-0ee6-444b-a113-f6ee1d162d72-kube-api-access-dldh9\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738136 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tg6gz\" (UniqueName: \"kubernetes.io/projected/af02ba5e-e22d-4363-a5e7-6a819c881eeb-kube-api-access-tg6gz\") pod \"openshift-apiserver-operator-796bbdcf4f-w6d2b\" (UID: \"af02ba5e-e22d-4363-a5e7-6a819c881eeb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w6d2b" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738151 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-audit-policies\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738178 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-image-import-ca\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738196 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvb8s\" (UniqueName: \"kubernetes.io/projected/cdedfd6e-9082-4411-b128-fc9806c67bd3-kube-api-access-rvb8s\") pod \"route-controller-manager-6576b87f9c-p7flx\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738212 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738227 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738246 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff-serving-cert\") pod \"console-operator-58897d9998-tz5lm\" (UID: \"abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff\") " pod="openshift-console-operator/console-operator-58897d9998-tz5lm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738268 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-config\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738284 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738301 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af02ba5e-e22d-4363-a5e7-6a819c881eeb-config\") pod \"openshift-apiserver-operator-796bbdcf4f-w6d2b\" (UID: \"af02ba5e-e22d-4363-a5e7-6a819c881eeb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w6d2b" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738316 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-trusted-ca-bundle\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738333 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-oauth-config\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738349 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/37ddbfb0-c042-460d-b772-9cdd214a79a1-node-pullsecrets\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738366 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-config\") pod \"controller-manager-879f6c89f-7l2lz\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738387 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738405 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c708f3f9-1c78-43ee-8630-add159a49c49-etcd-client\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738420 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738444 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phrq8\" (UniqueName: \"kubernetes.io/projected/889057c8-1eb2-4829-b1d5-a906b88eb68c-kube-api-access-phrq8\") pod \"cluster-samples-operator-665b6dd947-j6dnf\" (UID: \"889057c8-1eb2-4829-b1d5-a906b88eb68c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-j6dnf" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738461 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-trusted-ca-bundle\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738480 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-encryption-config\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738496 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8k6g4\" (UniqueName: \"kubernetes.io/projected/37ddbfb0-c042-460d-b772-9cdd214a79a1-kube-api-access-8k6g4\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738513 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738533 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff-trusted-ca\") pod \"console-operator-58897d9998-tz5lm\" (UID: \"abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff\") " pod="openshift-console-operator/console-operator-58897d9998-tz5lm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738551 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-etcd-serving-ca\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738568 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c708f3f9-1c78-43ee-8630-add159a49c49-audit-dir\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738585 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738601 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c708f3f9-1c78-43ee-8630-add159a49c49-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738617 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ad44a891-fc97-4154-8f93-bbd276c5c18a-audit-dir\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738636 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbhbm\" (UniqueName: \"kubernetes.io/projected/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-kube-api-access-bbhbm\") pod \"controller-manager-879f6c89f-7l2lz\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738652 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c708f3f9-1c78-43ee-8630-add159a49c49-serving-cert\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738672 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738703 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c708f3f9-1c78-43ee-8630-add159a49c49-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738798 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b-config\") pod \"machine-api-operator-5694c8668f-m4snv\" (UID: \"c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m4snv" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738175 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c708f3f9-1c78-43ee-8630-add159a49c49-audit-policies\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738957 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.738247 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/37ddbfb0-c042-460d-b772-9cdd214a79a1-audit-dir\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: W1128 11:10:52.739105 4923 reflector.go:561] object-"openshift-console-operator"/"trusted-ca": failed to list *v1.ConfigMap: configmaps "trusted-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-console-operator": no relationship found between node 'crc' and this object Nov 28 11:10:52 crc kubenswrapper[4923]: E1128 11:10:52.739130 4923 reflector.go:158] "Unhandled Error" err="object-\"openshift-console-operator\"/\"trusted-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"trusted-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-console-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.739598 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-client-ca\") pod \"controller-manager-879f6c89f-7l2lz\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.740009 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/37ddbfb0-c042-460d-b772-9cdd214a79a1-node-pullsecrets\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.740426 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b-images\") pod \"machine-api-operator-5694c8668f-m4snv\" (UID: \"c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m4snv" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.740504 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-m4snv"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.740957 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-7l2lz\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.741600 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c708f3f9-1c78-43ee-8630-add159a49c49-audit-dir\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.742565 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.743727 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-config\") pod \"controller-manager-879f6c89f-7l2lz\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.748211 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.748583 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c708f3f9-1c78-43ee-8630-add159a49c49-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.748815 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-f4dfh"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.749306 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-f4dfh" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.749676 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.750077 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.752485 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-8cknx"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.753141 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lswhk"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.753382 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-l7sqm"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.753661 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-l7sqm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.753846 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-8cknx" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.753978 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.757059 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-m4snv\" (UID: \"c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m4snv" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.757297 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c708f3f9-1c78-43ee-8630-add159a49c49-encryption-config\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.761067 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-serving-cert\") pod \"controller-manager-879f6c89f-7l2lz\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.762819 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dxdgg"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.763322 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-hcztj"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.763695 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nmc8h"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.764068 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nmc8h" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.764077 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dxdgg" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.764148 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hcztj" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.764062 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c708f3f9-1c78-43ee-8630-add159a49c49-etcd-client\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.764323 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c708f3f9-1c78-43ee-8630-add159a49c49-serving-cert\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.765893 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.766039 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.766332 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-lhbv8"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.766381 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.766965 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w6d2b"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.768016 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.768617 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.768806 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-44p4v"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.773332 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.773419 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-m6kq9"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.773675 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-44p4v" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.777924 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cnw7p"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.778197 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-m6kq9" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.778836 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-s5wfn"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.779213 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.779359 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cnw7p" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.788276 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.794908 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zfzrm"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.799354 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-k9zcz"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.799797 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zfzrm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.800241 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-k9zcz" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.803811 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.821097 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-28cv6"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.824303 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-9x5dg"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.825485 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.825613 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.825804 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-m4cv2"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.826502 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-m4cv2" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.828305 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-czm5s"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.828829 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-j6dnf"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.828927 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-czm5s" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.829994 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.833883 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-cbtlt"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.833916 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-2vsdg"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.835424 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-fd2jt"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.839832 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-8cknx"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.840223 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/889057c8-1eb2-4829-b1d5-a906b88eb68c-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-j6dnf\" (UID: \"889057c8-1eb2-4829-b1d5-a906b88eb68c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-j6dnf" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.840324 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.840454 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7f08880-f8db-4170-8d1d-1bccb2df10f4-serving-cert\") pod \"openshift-config-operator-7777fb866f-mqbzf\" (UID: \"e7f08880-f8db-4170-8d1d-1bccb2df10f4\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.840494 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/05bdba74-09ab-4d1c-9742-c842abf6c9f6-profile-collector-cert\") pod \"catalog-operator-68c6474976-hfvn9\" (UID: \"05bdba74-09ab-4d1c-9742-c842abf6c9f6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.840570 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhqs5\" (UniqueName: \"kubernetes.io/projected/abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff-kube-api-access-rhqs5\") pod \"console-operator-58897d9998-tz5lm\" (UID: \"abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff\") " pod="openshift-console-operator/console-operator-58897d9998-tz5lm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.840651 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjpmp\" (UniqueName: \"kubernetes.io/projected/05bdba74-09ab-4d1c-9742-c842abf6c9f6-kube-api-access-jjpmp\") pod \"catalog-operator-68c6474976-hfvn9\" (UID: \"05bdba74-09ab-4d1c-9742-c842abf6c9f6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.840703 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwmwf\" (UniqueName: \"kubernetes.io/projected/aa97fc63-7e09-4217-9fb9-78fca4703f04-kube-api-access-wwmwf\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.840729 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tg6gz\" (UniqueName: \"kubernetes.io/projected/af02ba5e-e22d-4363-a5e7-6a819c881eeb-kube-api-access-tg6gz\") pod \"openshift-apiserver-operator-796bbdcf4f-w6d2b\" (UID: \"af02ba5e-e22d-4363-a5e7-6a819c881eeb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w6d2b" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.840756 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-audit-policies\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.840800 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dldh9\" (UniqueName: \"kubernetes.io/projected/4926f4a2-0ee6-444b-a113-f6ee1d162d72-kube-api-access-dldh9\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.840831 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.840857 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.840888 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvb8s\" (UniqueName: \"kubernetes.io/projected/cdedfd6e-9082-4411-b128-fc9806c67bd3-kube-api-access-rvb8s\") pod \"route-controller-manager-6576b87f9c-p7flx\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.840913 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff-serving-cert\") pod \"console-operator-58897d9998-tz5lm\" (UID: \"abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff\") " pod="openshift-console-operator/console-operator-58897d9998-tz5lm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.840971 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29zt2\" (UniqueName: \"kubernetes.io/projected/54e50da8-3e06-4de6-88c0-cfe151b794ca-kube-api-access-29zt2\") pod \"packageserver-d55dfcdfc-6vdll\" (UID: \"54e50da8-3e06-4de6-88c0-cfe151b794ca\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.840997 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/54e50da8-3e06-4de6-88c0-cfe151b794ca-tmpfs\") pod \"packageserver-d55dfcdfc-6vdll\" (UID: \"54e50da8-3e06-4de6-88c0-cfe151b794ca\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841023 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/54e50da8-3e06-4de6-88c0-cfe151b794ca-webhook-cert\") pod \"packageserver-d55dfcdfc-6vdll\" (UID: \"54e50da8-3e06-4de6-88c0-cfe151b794ca\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841057 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841081 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-auth-proxy-config\") pod \"machine-approver-56656f9798-ckhbm\" (UID: \"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841111 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-oauth-config\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841135 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af02ba5e-e22d-4363-a5e7-6a819c881eeb-config\") pod \"openshift-apiserver-operator-796bbdcf4f-w6d2b\" (UID: \"af02ba5e-e22d-4363-a5e7-6a819c881eeb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w6d2b" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841158 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/54e50da8-3e06-4de6-88c0-cfe151b794ca-apiservice-cert\") pod \"packageserver-d55dfcdfc-6vdll\" (UID: \"54e50da8-3e06-4de6-88c0-cfe151b794ca\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841182 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841207 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841230 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-config\") pod \"machine-approver-56656f9798-ckhbm\" (UID: \"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841254 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phrq8\" (UniqueName: \"kubernetes.io/projected/889057c8-1eb2-4829-b1d5-a906b88eb68c-kube-api-access-phrq8\") pod \"cluster-samples-operator-665b6dd947-j6dnf\" (UID: \"889057c8-1eb2-4829-b1d5-a906b88eb68c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-j6dnf" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841289 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-trusted-ca-bundle\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841313 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hvt9\" (UniqueName: \"kubernetes.io/projected/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-kube-api-access-6hvt9\") pod \"machine-approver-56656f9798-ckhbm\" (UID: \"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841398 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c948b544-79ae-4148-8483-ef898d1b6663-metrics-tls\") pod \"ingress-operator-5b745b69d9-52xfd\" (UID: \"c948b544-79ae-4148-8483-ef898d1b6663\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841475 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841500 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff-trusted-ca\") pod \"console-operator-58897d9998-tz5lm\" (UID: \"abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff\") " pod="openshift-console-operator/console-operator-58897d9998-tz5lm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841524 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/45d0a5e3-828c-43e0-a609-ac7ae08d57af-metrics-tls\") pod \"dns-operator-744455d44c-w99dl\" (UID: \"45d0a5e3-828c-43e0-a609-ac7ae08d57af\") " pod="openshift-dns-operator/dns-operator-744455d44c-w99dl" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841548 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnbnr\" (UniqueName: \"kubernetes.io/projected/45d0a5e3-828c-43e0-a609-ac7ae08d57af-kube-api-access-wnbnr\") pod \"dns-operator-744455d44c-w99dl\" (UID: \"45d0a5e3-828c-43e0-a609-ac7ae08d57af\") " pod="openshift-dns-operator/dns-operator-744455d44c-w99dl" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841577 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841688 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c948b544-79ae-4148-8483-ef898d1b6663-trusted-ca\") pod \"ingress-operator-5b745b69d9-52xfd\" (UID: \"c948b544-79ae-4148-8483-ef898d1b6663\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841768 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dxdgg"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.841777 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ad44a891-fc97-4154-8f93-bbd276c5c18a-audit-dir\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.844159 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.844310 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-config\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.844417 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.844541 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-client-ca\") pod \"route-controller-manager-6576b87f9c-p7flx\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.844658 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af02ba5e-e22d-4363-a5e7-6a819c881eeb-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-w6d2b\" (UID: \"af02ba5e-e22d-4363-a5e7-6a819c881eeb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w6d2b" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.845267 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-service-ca\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.845384 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/e7f08880-f8db-4170-8d1d-1bccb2df10f4-available-featuregates\") pod \"openshift-config-operator-7777fb866f-mqbzf\" (UID: \"e7f08880-f8db-4170-8d1d-1bccb2df10f4\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.845492 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4926f4a2-0ee6-444b-a113-f6ee1d162d72-serving-cert\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.845581 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-service-ca-bundle\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.845679 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-serving-cert\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.845775 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2n7j\" (UniqueName: \"kubernetes.io/projected/e7f08880-f8db-4170-8d1d-1bccb2df10f4-kube-api-access-j2n7j\") pod \"openshift-config-operator-7777fb866f-mqbzf\" (UID: \"e7f08880-f8db-4170-8d1d-1bccb2df10f4\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.845926 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2hbs\" (UniqueName: \"kubernetes.io/projected/c948b544-79ae-4148-8483-ef898d1b6663-kube-api-access-f2hbs\") pod \"ingress-operator-5b745b69d9-52xfd\" (UID: \"c948b544-79ae-4148-8483-ef898d1b6663\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.846072 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-config\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.846123 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.842041 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ad44a891-fc97-4154-8f93-bbd276c5c18a-audit-dir\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.847181 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.842854 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-audit-policies\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.842858 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af02ba5e-e22d-4363-a5e7-6a819c881eeb-config\") pod \"openshift-apiserver-operator-796bbdcf4f-w6d2b\" (UID: \"af02ba5e-e22d-4363-a5e7-6a819c881eeb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w6d2b" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.847518 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.843038 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.843854 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.847745 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.847801 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-w99dl"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.845833 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.846169 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-config\") pod \"route-controller-manager-6576b87f9c-p7flx\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.847998 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdedfd6e-9082-4411-b128-fc9806c67bd3-serving-cert\") pod \"route-controller-manager-6576b87f9c-p7flx\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.848055 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-machine-approver-tls\") pod \"machine-approver-56656f9798-ckhbm\" (UID: \"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.848083 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-oauth-serving-cert\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.848120 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff-config\") pod \"console-operator-58897d9998-tz5lm\" (UID: \"abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff\") " pod="openshift-console-operator/console-operator-58897d9998-tz5lm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.848138 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c948b544-79ae-4148-8483-ef898d1b6663-bound-sa-token\") pod \"ingress-operator-5b745b69d9-52xfd\" (UID: \"c948b544-79ae-4148-8483-ef898d1b6663\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.848162 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.848198 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vtcd\" (UniqueName: \"kubernetes.io/projected/ad44a891-fc97-4154-8f93-bbd276c5c18a-kube-api-access-7vtcd\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.848213 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/05bdba74-09ab-4d1c-9742-c842abf6c9f6-srv-cert\") pod \"catalog-operator-68c6474976-hfvn9\" (UID: \"05bdba74-09ab-4d1c-9742-c842abf6c9f6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.848231 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.848703 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/e7f08880-f8db-4170-8d1d-1bccb2df10f4-available-featuregates\") pod \"openshift-config-operator-7777fb866f-mqbzf\" (UID: \"e7f08880-f8db-4170-8d1d-1bccb2df10f4\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.849078 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff-serving-cert\") pod \"console-operator-58897d9998-tz5lm\" (UID: \"abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff\") " pod="openshift-console-operator/console-operator-58897d9998-tz5lm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.849436 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.849745 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff-config\") pod \"console-operator-58897d9998-tz5lm\" (UID: \"abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff\") " pod="openshift-console-operator/console-operator-58897d9998-tz5lm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.851134 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.852463 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.852658 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af02ba5e-e22d-4363-a5e7-6a819c881eeb-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-w6d2b\" (UID: \"af02ba5e-e22d-4363-a5e7-6a819c881eeb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w6d2b" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.852981 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.853719 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.854625 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.857646 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-g855d"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.858973 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7l2lz"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.859950 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-hcztj"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.861555 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.862414 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.865342 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.866529 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.867970 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-f4dfh"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.869207 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-tz5lm"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.871054 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.872354 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zfzrm"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.873505 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-m6kq9"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.875082 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nmc8h"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.876494 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cnw7p"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.877893 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jhjvz"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.879088 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.880462 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-9x5dg"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.881904 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lswhk"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.882349 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.883549 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-l7sqm"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.884861 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.886773 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-44p4v"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.888451 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.890219 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-s5wfn"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.891495 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-8b2zb"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.892248 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-8b2zb" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.893085 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-m4cv2"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.897122 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-k9zcz"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.897160 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-8b2zb"] Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.902809 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.922052 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.943078 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.948848 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-machine-approver-tls\") pod \"machine-approver-56656f9798-ckhbm\" (UID: \"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.948903 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c948b544-79ae-4148-8483-ef898d1b6663-bound-sa-token\") pod \"ingress-operator-5b745b69d9-52xfd\" (UID: \"c948b544-79ae-4148-8483-ef898d1b6663\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.949065 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/05bdba74-09ab-4d1c-9742-c842abf6c9f6-srv-cert\") pod \"catalog-operator-68c6474976-hfvn9\" (UID: \"05bdba74-09ab-4d1c-9742-c842abf6c9f6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.949132 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/05bdba74-09ab-4d1c-9742-c842abf6c9f6-profile-collector-cert\") pod \"catalog-operator-68c6474976-hfvn9\" (UID: \"05bdba74-09ab-4d1c-9742-c842abf6c9f6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.949178 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjpmp\" (UniqueName: \"kubernetes.io/projected/05bdba74-09ab-4d1c-9742-c842abf6c9f6-kube-api-access-jjpmp\") pod \"catalog-operator-68c6474976-hfvn9\" (UID: \"05bdba74-09ab-4d1c-9742-c842abf6c9f6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.949305 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29zt2\" (UniqueName: \"kubernetes.io/projected/54e50da8-3e06-4de6-88c0-cfe151b794ca-kube-api-access-29zt2\") pod \"packageserver-d55dfcdfc-6vdll\" (UID: \"54e50da8-3e06-4de6-88c0-cfe151b794ca\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.949352 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/54e50da8-3e06-4de6-88c0-cfe151b794ca-tmpfs\") pod \"packageserver-d55dfcdfc-6vdll\" (UID: \"54e50da8-3e06-4de6-88c0-cfe151b794ca\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.949376 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/54e50da8-3e06-4de6-88c0-cfe151b794ca-webhook-cert\") pod \"packageserver-d55dfcdfc-6vdll\" (UID: \"54e50da8-3e06-4de6-88c0-cfe151b794ca\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.949413 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-auth-proxy-config\") pod \"machine-approver-56656f9798-ckhbm\" (UID: \"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.949454 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/54e50da8-3e06-4de6-88c0-cfe151b794ca-apiservice-cert\") pod \"packageserver-d55dfcdfc-6vdll\" (UID: \"54e50da8-3e06-4de6-88c0-cfe151b794ca\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.949492 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-config\") pod \"machine-approver-56656f9798-ckhbm\" (UID: \"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.949545 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hvt9\" (UniqueName: \"kubernetes.io/projected/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-kube-api-access-6hvt9\") pod \"machine-approver-56656f9798-ckhbm\" (UID: \"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.949576 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c948b544-79ae-4148-8483-ef898d1b6663-metrics-tls\") pod \"ingress-operator-5b745b69d9-52xfd\" (UID: \"c948b544-79ae-4148-8483-ef898d1b6663\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.949635 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/45d0a5e3-828c-43e0-a609-ac7ae08d57af-metrics-tls\") pod \"dns-operator-744455d44c-w99dl\" (UID: \"45d0a5e3-828c-43e0-a609-ac7ae08d57af\") " pod="openshift-dns-operator/dns-operator-744455d44c-w99dl" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.949661 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnbnr\" (UniqueName: \"kubernetes.io/projected/45d0a5e3-828c-43e0-a609-ac7ae08d57af-kube-api-access-wnbnr\") pod \"dns-operator-744455d44c-w99dl\" (UID: \"45d0a5e3-828c-43e0-a609-ac7ae08d57af\") " pod="openshift-dns-operator/dns-operator-744455d44c-w99dl" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.949688 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c948b544-79ae-4148-8483-ef898d1b6663-trusted-ca\") pod \"ingress-operator-5b745b69d9-52xfd\" (UID: \"c948b544-79ae-4148-8483-ef898d1b6663\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.950109 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/54e50da8-3e06-4de6-88c0-cfe151b794ca-tmpfs\") pod \"packageserver-d55dfcdfc-6vdll\" (UID: \"54e50da8-3e06-4de6-88c0-cfe151b794ca\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.950360 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2hbs\" (UniqueName: \"kubernetes.io/projected/c948b544-79ae-4148-8483-ef898d1b6663-kube-api-access-f2hbs\") pod \"ingress-operator-5b745b69d9-52xfd\" (UID: \"c948b544-79ae-4148-8483-ef898d1b6663\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.952292 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/45d0a5e3-828c-43e0-a609-ac7ae08d57af-metrics-tls\") pod \"dns-operator-744455d44c-w99dl\" (UID: \"45d0a5e3-828c-43e0-a609-ac7ae08d57af\") " pod="openshift-dns-operator/dns-operator-744455d44c-w99dl" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.962654 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 28 11:10:52 crc kubenswrapper[4923]: I1128 11:10:52.982715 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.001822 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.022838 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.034572 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/05bdba74-09ab-4d1c-9742-c842abf6c9f6-srv-cert\") pod \"catalog-operator-68c6474976-hfvn9\" (UID: \"05bdba74-09ab-4d1c-9742-c842abf6c9f6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.043370 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.051918 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/05bdba74-09ab-4d1c-9742-c842abf6c9f6-profile-collector-cert\") pod \"catalog-operator-68c6474976-hfvn9\" (UID: \"05bdba74-09ab-4d1c-9742-c842abf6c9f6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.062234 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.103378 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.114802 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/54e50da8-3e06-4de6-88c0-cfe151b794ca-webhook-cert\") pod \"packageserver-d55dfcdfc-6vdll\" (UID: \"54e50da8-3e06-4de6-88c0-cfe151b794ca\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.115685 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/54e50da8-3e06-4de6-88c0-cfe151b794ca-apiservice-cert\") pod \"packageserver-d55dfcdfc-6vdll\" (UID: \"54e50da8-3e06-4de6-88c0-cfe151b794ca\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.122508 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.158896 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bdxq\" (UniqueName: \"kubernetes.io/projected/c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b-kube-api-access-6bdxq\") pod \"machine-api-operator-5694c8668f-m4snv\" (UID: \"c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m4snv" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.184987 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4l8c\" (UniqueName: \"kubernetes.io/projected/c708f3f9-1c78-43ee-8630-add159a49c49-kube-api-access-s4l8c\") pod \"apiserver-7bbb656c7d-pbbcs\" (UID: \"c708f3f9-1c78-43ee-8630-add159a49c49\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.215169 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-m4snv" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.223218 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.231399 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.234072 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbhbm\" (UniqueName: \"kubernetes.io/projected/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-kube-api-access-bbhbm\") pod \"controller-manager-879f6c89f-7l2lz\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.244015 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.263159 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.283978 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.303311 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.316908 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c948b544-79ae-4148-8483-ef898d1b6663-metrics-tls\") pod \"ingress-operator-5b745b69d9-52xfd\" (UID: \"c948b544-79ae-4148-8483-ef898d1b6663\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.337034 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.343287 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c948b544-79ae-4148-8483-ef898d1b6663-trusted-ca\") pod \"ingress-operator-5b745b69d9-52xfd\" (UID: \"c948b544-79ae-4148-8483-ef898d1b6663\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.344771 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.364978 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.382829 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.404831 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.427622 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.444012 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.447165 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.464126 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.473073 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs"] Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.484102 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 28 11:10:53 crc kubenswrapper[4923]: W1128 11:10:53.491235 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc708f3f9_1c78_43ee_8630_add159a49c49.slice/crio-e582a2f1beec3a7e34abff9d5040173bdf5abc56e82ef1c7a4e79281a96d3640 WatchSource:0}: Error finding container e582a2f1beec3a7e34abff9d5040173bdf5abc56e82ef1c7a4e79281a96d3640: Status 404 returned error can't find the container with id e582a2f1beec3a7e34abff9d5040173bdf5abc56e82ef1c7a4e79281a96d3640 Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.503643 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-m4snv"] Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.505004 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.522207 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.546589 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.573710 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.585826 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.602840 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.622298 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.644999 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.652906 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7l2lz"] Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.662487 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.682574 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.702043 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.722697 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.738754 4923 secret.go:188] Couldn't get secret openshift-apiserver/etcd-client: failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.738815 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-etcd-client podName:37ddbfb0-c042-460d-b772-9cdd214a79a1 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.238797813 +0000 UTC m=+133.367482023 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-etcd-client") pod "apiserver-76f77b778f-lhbv8" (UID: "37ddbfb0-c042-460d-b772-9cdd214a79a1") : failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.738789 4923 configmap.go:193] Couldn't get configMap openshift-apiserver/audit-1: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.738907 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-audit podName:37ddbfb0-c042-460d-b772-9cdd214a79a1 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.238886215 +0000 UTC m=+133.367570465 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-audit") pod "apiserver-76f77b778f-lhbv8" (UID: "37ddbfb0-c042-460d-b772-9cdd214a79a1") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.738947 4923 configmap.go:193] Couldn't get configMap openshift-apiserver/config: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.739014 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-config podName:37ddbfb0-c042-460d-b772-9cdd214a79a1 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.238997568 +0000 UTC m=+133.367681778 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-config") pod "apiserver-76f77b778f-lhbv8" (UID: "37ddbfb0-c042-460d-b772-9cdd214a79a1") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.739043 4923 configmap.go:193] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.739090 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-trusted-ca-bundle podName:37ddbfb0-c042-460d-b772-9cdd214a79a1 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.239076061 +0000 UTC m=+133.367760301 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-trusted-ca-bundle") pod "apiserver-76f77b778f-lhbv8" (UID: "37ddbfb0-c042-460d-b772-9cdd214a79a1") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.739858 4923 configmap.go:193] Couldn't get configMap openshift-apiserver/etcd-serving-ca: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.739895 4923 secret.go:188] Couldn't get secret openshift-apiserver/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.739907 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-etcd-serving-ca podName:37ddbfb0-c042-460d-b772-9cdd214a79a1 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.239891024 +0000 UTC m=+133.368575264 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-etcd-serving-ca") pod "apiserver-76f77b778f-lhbv8" (UID: "37ddbfb0-c042-460d-b772-9cdd214a79a1") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.739902 4923 configmap.go:193] Couldn't get configMap openshift-apiserver/image-import-ca: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.739986 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-serving-cert podName:37ddbfb0-c042-460d-b772-9cdd214a79a1 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.239975406 +0000 UTC m=+133.368659616 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-serving-cert") pod "apiserver-76f77b778f-lhbv8" (UID: "37ddbfb0-c042-460d-b772-9cdd214a79a1") : failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.740020 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-image-import-ca podName:37ddbfb0-c042-460d-b772-9cdd214a79a1 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.240001607 +0000 UTC m=+133.368685817 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-image-import-ca") pod "apiserver-76f77b778f-lhbv8" (UID: "37ddbfb0-c042-460d-b772-9cdd214a79a1") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.742082 4923 secret.go:188] Couldn't get secret openshift-apiserver/encryption-config-1: failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.742121 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-encryption-config podName:37ddbfb0-c042-460d-b772-9cdd214a79a1 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.242111946 +0000 UTC m=+133.370796156 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-encryption-config") pod "apiserver-76f77b778f-lhbv8" (UID: "37ddbfb0-c042-460d-b772-9cdd214a79a1") : failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.743557 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.762709 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.781710 4923 request.go:700] Waited for 1.017224952s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/configmaps?fieldSelector=metadata.name%3Dopenshift-kube-scheduler-operator-config&limit=500&resourceVersion=0 Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.783416 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.804774 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.842183 4923 secret.go:188] Couldn't get secret openshift-console/console-oauth-config: failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.842224 4923 secret.go:188] Couldn't get secret openshift-config-operator/config-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.842179 4923 secret.go:188] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.842324 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-oauth-config podName:aa97fc63-7e09-4217-9fb9-78fca4703f04 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.342252428 +0000 UTC m=+133.470936678 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-oauth-config") pod "console-f9d7485db-2vsdg" (UID: "aa97fc63-7e09-4217-9fb9-78fca4703f04") : failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.842358 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e7f08880-f8db-4170-8d1d-1bccb2df10f4-serving-cert podName:e7f08880-f8db-4170-8d1d-1bccb2df10f4 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.34234345 +0000 UTC m=+133.471027700 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e7f08880-f8db-4170-8d1d-1bccb2df10f4-serving-cert") pod "openshift-config-operator-7777fb866f-mqbzf" (UID: "e7f08880-f8db-4170-8d1d-1bccb2df10f4") : failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.842422 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/889057c8-1eb2-4829-b1d5-a906b88eb68c-samples-operator-tls podName:889057c8-1eb2-4829-b1d5-a906b88eb68c nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.342371311 +0000 UTC m=+133.471055551 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/889057c8-1eb2-4829-b1d5-a906b88eb68c-samples-operator-tls") pod "cluster-samples-operator-665b6dd947-j6dnf" (UID: "889057c8-1eb2-4829-b1d5-a906b88eb68c") : failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.843662 4923 configmap.go:193] Couldn't get configMap openshift-console-operator/trusted-ca: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.843755 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff-trusted-ca podName:abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.3437253 +0000 UTC m=+133.472409550 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff-trusted-ca") pod "console-operator-58897d9998-tz5lm" (UID: "abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.843830 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.847269 4923 configmap.go:193] Couldn't get configMap openshift-route-controller-manager/config: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.847338 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-config podName:cdedfd6e-9082-4411-b128-fc9806c67bd3 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.347320671 +0000 UTC m=+133.476004921 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-config") pod "route-controller-manager-6576b87f9c-p7flx" (UID: "cdedfd6e-9082-4411-b128-fc9806c67bd3") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.847395 4923 configmap.go:193] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.847438 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-trusted-ca-bundle podName:4926f4a2-0ee6-444b-a113-f6ee1d162d72 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.347424574 +0000 UTC m=+133.476108814 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-trusted-ca-bundle") pod "authentication-operator-69f744f599-cbtlt" (UID: "4926f4a2-0ee6-444b-a113-f6ee1d162d72") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.847539 4923 configmap.go:193] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.847578 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-config podName:4926f4a2-0ee6-444b-a113-f6ee1d162d72 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.347566128 +0000 UTC m=+133.476250378 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-config") pod "authentication-operator-69f744f599-cbtlt" (UID: "4926f4a2-0ee6-444b-a113-f6ee1d162d72") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.847782 4923 configmap.go:193] Couldn't get configMap openshift-route-controller-manager/client-ca: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.847842 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-client-ca podName:cdedfd6e-9082-4411-b128-fc9806c67bd3 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.347829536 +0000 UTC m=+133.476513786 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-client-ca") pod "route-controller-manager-6576b87f9c-p7flx" (UID: "cdedfd6e-9082-4411-b128-fc9806c67bd3") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.847881 4923 configmap.go:193] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.847889 4923 configmap.go:193] Couldn't get configMap openshift-console/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.847916 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-service-ca-bundle podName:4926f4a2-0ee6-444b-a113-f6ee1d162d72 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.347905948 +0000 UTC m=+133.476590198 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-service-ca-bundle") pod "authentication-operator-69f744f599-cbtlt" (UID: "4926f4a2-0ee6-444b-a113-f6ee1d162d72") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.847974 4923 secret.go:188] Couldn't get secret openshift-console/console-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.848023 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-trusted-ca-bundle podName:aa97fc63-7e09-4217-9fb9-78fca4703f04 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.34798824 +0000 UTC m=+133.476672560 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-trusted-ca-bundle") pod "console-f9d7485db-2vsdg" (UID: "aa97fc63-7e09-4217-9fb9-78fca4703f04") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.848070 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-serving-cert podName:aa97fc63-7e09-4217-9fb9-78fca4703f04 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.348049832 +0000 UTC m=+133.476734182 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "console-serving-cert" (UniqueName: "kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-serving-cert") pod "console-f9d7485db-2vsdg" (UID: "aa97fc63-7e09-4217-9fb9-78fca4703f04") : failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.849330 4923 configmap.go:193] Couldn't get configMap openshift-console/console-config: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.849399 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-config podName:aa97fc63-7e09-4217-9fb9-78fca4703f04 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.349378859 +0000 UTC m=+133.478063209 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "console-config" (UniqueName: "kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-config") pod "console-f9d7485db-2vsdg" (UID: "aa97fc63-7e09-4217-9fb9-78fca4703f04") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.849430 4923 secret.go:188] Couldn't get secret openshift-route-controller-manager/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.849473 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cdedfd6e-9082-4411-b128-fc9806c67bd3-serving-cert podName:cdedfd6e-9082-4411-b128-fc9806c67bd3 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.349456962 +0000 UTC m=+133.478141332 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/cdedfd6e-9082-4411-b128-fc9806c67bd3-serving-cert") pod "route-controller-manager-6576b87f9c-p7flx" (UID: "cdedfd6e-9082-4411-b128-fc9806c67bd3") : failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.849964 4923 configmap.go:193] Couldn't get configMap openshift-console/service-ca: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.850017 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-service-ca podName:aa97fc63-7e09-4217-9fb9-78fca4703f04 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.350001097 +0000 UTC m=+133.478685347 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-service-ca") pod "console-f9d7485db-2vsdg" (UID: "aa97fc63-7e09-4217-9fb9-78fca4703f04") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.850045 4923 configmap.go:193] Couldn't get configMap openshift-console/oauth-serving-cert: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.850113 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-oauth-serving-cert podName:aa97fc63-7e09-4217-9fb9-78fca4703f04 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.35009642 +0000 UTC m=+133.478780670 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-oauth-serving-cert") pod "console-f9d7485db-2vsdg" (UID: "aa97fc63-7e09-4217-9fb9-78fca4703f04") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.850053 4923 secret.go:188] Couldn't get secret openshift-authentication-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.850176 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4926f4a2-0ee6-444b-a113-f6ee1d162d72-serving-cert podName:4926f4a2-0ee6-444b-a113-f6ee1d162d72 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.350161952 +0000 UTC m=+133.478846202 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/4926f4a2-0ee6-444b-a113-f6ee1d162d72-serving-cert") pod "authentication-operator-69f744f599-cbtlt" (UID: "4926f4a2-0ee6-444b-a113-f6ee1d162d72") : failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.863203 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.882919 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.903664 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.922708 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.943813 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.949133 4923 secret.go:188] Couldn't get secret openshift-cluster-machine-approver/machine-approver-tls: failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.949281 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-machine-approver-tls podName:1b9cce3b-92fb-45f2-a81a-4a0a722ed13e nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.449242863 +0000 UTC m=+133.577927133 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "machine-approver-tls" (UniqueName: "kubernetes.io/secret/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-machine-approver-tls") pod "machine-approver-56656f9798-ckhbm" (UID: "1b9cce3b-92fb-45f2-a81a-4a0a722ed13e") : failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.950227 4923 configmap.go:193] Couldn't get configMap openshift-cluster-machine-approver/machine-approver-config: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.950311 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-config podName:1b9cce3b-92fb-45f2-a81a-4a0a722ed13e nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.450284023 +0000 UTC m=+133.578968323 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-config") pod "machine-approver-56656f9798-ckhbm" (UID: "1b9cce3b-92fb-45f2-a81a-4a0a722ed13e") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.950380 4923 configmap.go:193] Couldn't get configMap openshift-cluster-machine-approver/kube-rbac-proxy: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: E1128 11:10:53.950441 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-auth-proxy-config podName:1b9cce3b-92fb-45f2-a81a-4a0a722ed13e nodeName:}" failed. No retries permitted until 2025-11-28 11:10:54.450418226 +0000 UTC m=+133.579102566 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "auth-proxy-config" (UniqueName: "kubernetes.io/configmap/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-auth-proxy-config") pod "machine-approver-56656f9798-ckhbm" (UID: "1b9cce3b-92fb-45f2-a81a-4a0a722ed13e") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.964575 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 28 11:10:53 crc kubenswrapper[4923]: I1128 11:10:53.985468 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.003579 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.023635 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.045380 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.066358 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.083085 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.105071 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.122754 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.142855 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.164496 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.182643 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.202928 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 28 11:10:54 crc kubenswrapper[4923]: E1128 11:10:54.207988 4923 projected.go:288] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.223072 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.242978 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.263138 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.278960 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-etcd-client\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.279556 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-serving-cert\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.280005 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-image-import-ca\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.280269 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-config\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.280750 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-trusted-ca-bundle\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.281128 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-encryption-config\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.281387 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-etcd-serving-ca\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.281725 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-audit\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.284238 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.303628 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.324091 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.343965 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.364076 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.382551 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-config\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.382605 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-client-ca\") pod \"route-controller-manager-6576b87f9c-p7flx\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.382633 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-serving-cert\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.382656 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-service-ca\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.382689 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4926f4a2-0ee6-444b-a113-f6ee1d162d72-serving-cert\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.382711 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-service-ca-bundle\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.382747 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-config\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.382769 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-config\") pod \"route-controller-manager-6576b87f9c-p7flx\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.382789 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdedfd6e-9082-4411-b128-fc9806c67bd3-serving-cert\") pod \"route-controller-manager-6576b87f9c-p7flx\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.382848 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-oauth-serving-cert\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.382881 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/889057c8-1eb2-4829-b1d5-a906b88eb68c-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-j6dnf\" (UID: \"889057c8-1eb2-4829-b1d5-a906b88eb68c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-j6dnf" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.382905 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7f08880-f8db-4170-8d1d-1bccb2df10f4-serving-cert\") pod \"openshift-config-operator-7777fb866f-mqbzf\" (UID: \"e7f08880-f8db-4170-8d1d-1bccb2df10f4\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.383048 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-oauth-config\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.383086 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.383133 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-trusted-ca-bundle\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.383184 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff-trusted-ca\") pod \"console-operator-58897d9998-tz5lm\" (UID: \"abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff\") " pod="openshift-console-operator/console-operator-58897d9998-tz5lm" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.383808 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.402433 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.423601 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.435077 4923 generic.go:334] "Generic (PLEG): container finished" podID="c708f3f9-1c78-43ee-8630-add159a49c49" containerID="92fa0c6b008f15b710f9cda682b0ce02cf926c11b3289d8b48b52073006cdc88" exitCode=0 Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.435188 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" event={"ID":"c708f3f9-1c78-43ee-8630-add159a49c49","Type":"ContainerDied","Data":"92fa0c6b008f15b710f9cda682b0ce02cf926c11b3289d8b48b52073006cdc88"} Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.435231 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" event={"ID":"c708f3f9-1c78-43ee-8630-add159a49c49","Type":"ContainerStarted","Data":"e582a2f1beec3a7e34abff9d5040173bdf5abc56e82ef1c7a4e79281a96d3640"} Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.438566 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" event={"ID":"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8","Type":"ContainerStarted","Data":"22071f7c5139b03a49bf4d4f5b55c17f93bd7c8cf3da175d3662380bc8dc5cd8"} Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.438594 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" event={"ID":"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8","Type":"ContainerStarted","Data":"64a7de2be060b261c9f4078a3e86033f251de75cc6a347b629874adb430fe0f8"} Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.438851 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.442807 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.447394 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.450474 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-m4snv" event={"ID":"c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b","Type":"ContainerStarted","Data":"b9cc48e813bdf46b35e615a6586f65a5dd8de78ff7cc9bb1e1ba3014f5bf93bf"} Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.450506 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-m4snv" event={"ID":"c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b","Type":"ContainerStarted","Data":"59f5595cfbe6bcffb181ff4b04ee6024ea5f50b39979974664cd197a1cceb597"} Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.450520 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-m4snv" event={"ID":"c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b","Type":"ContainerStarted","Data":"0d0bb2e8edefecbfc77ad41a5928c6277d7163ec0e9d976f2a948f62703d2181"} Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.462545 4923 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.483751 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.484403 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-auth-proxy-config\") pod \"machine-approver-56656f9798-ckhbm\" (UID: \"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.484630 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-config\") pod \"machine-approver-56656f9798-ckhbm\" (UID: \"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.485146 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-machine-approver-tls\") pod \"machine-approver-56656f9798-ckhbm\" (UID: \"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.503246 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.522779 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.542361 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.562782 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.586031 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.602644 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.677622 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tg6gz\" (UniqueName: \"kubernetes.io/projected/af02ba5e-e22d-4363-a5e7-6a819c881eeb-kube-api-access-tg6gz\") pod \"openshift-apiserver-operator-796bbdcf4f-w6d2b\" (UID: \"af02ba5e-e22d-4363-a5e7-6a819c881eeb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w6d2b" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.739016 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w6d2b" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.785075 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vtcd\" (UniqueName: \"kubernetes.io/projected/ad44a891-fc97-4154-8f93-bbd276c5c18a-kube-api-access-7vtcd\") pod \"oauth-openshift-558db77b4-28cv6\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.785828 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.800899 4923 request.go:700] Waited for 1.907986728s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-canary/secrets?fieldSelector=metadata.name%3Dcanary-serving-cert&limit=500&resourceVersion=0 Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.801777 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.824365 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.849627 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.883749 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c948b544-79ae-4148-8483-ef898d1b6663-bound-sa-token\") pod \"ingress-operator-5b745b69d9-52xfd\" (UID: \"c948b544-79ae-4148-8483-ef898d1b6663\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.920122 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjpmp\" (UniqueName: \"kubernetes.io/projected/05bdba74-09ab-4d1c-9742-c842abf6c9f6-kube-api-access-jjpmp\") pod \"catalog-operator-68c6474976-hfvn9\" (UID: \"05bdba74-09ab-4d1c-9742-c842abf6c9f6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.932640 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29zt2\" (UniqueName: \"kubernetes.io/projected/54e50da8-3e06-4de6-88c0-cfe151b794ca-kube-api-access-29zt2\") pod \"packageserver-d55dfcdfc-6vdll\" (UID: \"54e50da8-3e06-4de6-88c0-cfe151b794ca\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.973274 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnbnr\" (UniqueName: \"kubernetes.io/projected/45d0a5e3-828c-43e0-a609-ac7ae08d57af-kube-api-access-wnbnr\") pod \"dns-operator-744455d44c-w99dl\" (UID: \"45d0a5e3-828c-43e0-a609-ac7ae08d57af\") " pod="openshift-dns-operator/dns-operator-744455d44c-w99dl" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.981017 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-w99dl" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.983687 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2hbs\" (UniqueName: \"kubernetes.io/projected/c948b544-79ae-4148-8483-ef898d1b6663-kube-api-access-f2hbs\") pod \"ingress-operator-5b745b69d9-52xfd\" (UID: \"c948b544-79ae-4148-8483-ef898d1b6663\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd" Nov 28 11:10:54 crc kubenswrapper[4923]: I1128 11:10:54.998538 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.000637 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.001640 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w6d2b"] Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.003799 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.006764 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-serving-cert\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.026694 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.030876 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.042462 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.050258 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.051081 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-oauth-serving-cert\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.064224 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.085184 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.097612 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4926f4a2-0ee6-444b-a113-f6ee1d162d72-serving-cert\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.102764 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.132166 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.137545 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-service-ca-bundle\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.151634 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.152494 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-audit\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.177157 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.205217 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.214150 4923 projected.go:194] Error preparing data for projected volume kube-api-access-8k6g4 for pod openshift-apiserver/apiserver-76f77b778f-lhbv8: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.214235 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/37ddbfb0-c042-460d-b772-9cdd214a79a1-kube-api-access-8k6g4 podName:37ddbfb0-c042-460d-b772-9cdd214a79a1 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:55.71421532 +0000 UTC m=+134.842899530 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-8k6g4" (UniqueName: "kubernetes.io/projected/37ddbfb0-c042-460d-b772-9cdd214a79a1-kube-api-access-8k6g4") pod "apiserver-76f77b778f-lhbv8" (UID: "37ddbfb0-c042-460d-b772-9cdd214a79a1") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.223205 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.241353 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.244186 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-config\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.244375 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.244715 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff-trusted-ca\") pod \"console-operator-58897d9998-tz5lm\" (UID: \"abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff\") " pod="openshift-console-operator/console-operator-58897d9998-tz5lm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.262527 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdedfd6e-9082-4411-b128-fc9806c67bd3-serving-cert\") pod \"route-controller-manager-6576b87f9c-p7flx\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.262871 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.272442 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-serving-cert\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.279748 4923 secret.go:188] Couldn't get secret openshift-apiserver/etcd-client: failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.279816 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-etcd-client podName:37ddbfb0-c042-460d-b772-9cdd214a79a1 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.279800234 +0000 UTC m=+135.408484444 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-etcd-client") pod "apiserver-76f77b778f-lhbv8" (UID: "37ddbfb0-c042-460d-b772-9cdd214a79a1") : failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.280133 4923 configmap.go:193] Couldn't get configMap openshift-apiserver/image-import-ca: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.280159 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-image-import-ca podName:37ddbfb0-c042-460d-b772-9cdd214a79a1 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.280151914 +0000 UTC m=+135.408836124 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-image-import-ca") pod "apiserver-76f77b778f-lhbv8" (UID: "37ddbfb0-c042-460d-b772-9cdd214a79a1") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.282509 4923 configmap.go:193] Couldn't get configMap openshift-apiserver/etcd-serving-ca: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.282572 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-etcd-serving-ca podName:37ddbfb0-c042-460d-b772-9cdd214a79a1 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.282555762 +0000 UTC m=+135.411239972 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-etcd-serving-ca") pod "apiserver-76f77b778f-lhbv8" (UID: "37ddbfb0-c042-460d-b772-9cdd214a79a1") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.282598 4923 configmap.go:193] Couldn't get configMap openshift-apiserver/config: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.282618 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-config podName:37ddbfb0-c042-460d-b772-9cdd214a79a1 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.282611874 +0000 UTC m=+135.411296084 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-config") pod "apiserver-76f77b778f-lhbv8" (UID: "37ddbfb0-c042-460d-b772-9cdd214a79a1") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.282634 4923 secret.go:188] Couldn't get secret openshift-apiserver/encryption-config-1: failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.282652 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-encryption-config podName:37ddbfb0-c042-460d-b772-9cdd214a79a1 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.282647515 +0000 UTC m=+135.411331715 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-encryption-config") pod "apiserver-76f77b778f-lhbv8" (UID: "37ddbfb0-c042-460d-b772-9cdd214a79a1") : failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.282687 4923 configmap.go:193] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.282707 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-trusted-ca-bundle podName:37ddbfb0-c042-460d-b772-9cdd214a79a1 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.282701906 +0000 UTC m=+135.411386106 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-trusted-ca-bundle") pod "apiserver-76f77b778f-lhbv8" (UID: "37ddbfb0-c042-460d-b772-9cdd214a79a1") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.286765 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.306346 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.323245 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.330895 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dldh9\" (UniqueName: \"kubernetes.io/projected/4926f4a2-0ee6-444b-a113-f6ee1d162d72-kube-api-access-dldh9\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.343553 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.376236 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.383796 4923 configmap.go:193] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.383861 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-trusted-ca-bundle podName:4926f4a2-0ee6-444b-a113-f6ee1d162d72 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.383843406 +0000 UTC m=+135.512527616 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-trusted-ca-bundle") pod "authentication-operator-69f744f599-cbtlt" (UID: "4926f4a2-0ee6-444b-a113-f6ee1d162d72") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.383987 4923 secret.go:188] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.384049 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/889057c8-1eb2-4829-b1d5-a906b88eb68c-samples-operator-tls podName:889057c8-1eb2-4829-b1d5-a906b88eb68c nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.384032321 +0000 UTC m=+135.512716531 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/889057c8-1eb2-4829-b1d5-a906b88eb68c-samples-operator-tls") pod "cluster-samples-operator-665b6dd947-j6dnf" (UID: "889057c8-1eb2-4829-b1d5-a906b88eb68c") : failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.384103 4923 configmap.go:193] Couldn't get configMap openshift-console/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.384110 4923 configmap.go:193] Couldn't get configMap openshift-console/service-ca: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.384126 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-trusted-ca-bundle podName:aa97fc63-7e09-4217-9fb9-78fca4703f04 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.384120154 +0000 UTC m=+135.512804364 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-trusted-ca-bundle") pod "console-f9d7485db-2vsdg" (UID: "aa97fc63-7e09-4217-9fb9-78fca4703f04") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.384141 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-service-ca podName:aa97fc63-7e09-4217-9fb9-78fca4703f04 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.384133464 +0000 UTC m=+135.512817674 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-service-ca") pod "console-f9d7485db-2vsdg" (UID: "aa97fc63-7e09-4217-9fb9-78fca4703f04") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.384144 4923 configmap.go:193] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.384154 4923 secret.go:188] Couldn't get secret openshift-config-operator/config-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.384166 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-config podName:4926f4a2-0ee6-444b-a113-f6ee1d162d72 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.384157365 +0000 UTC m=+135.512841575 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-config") pod "authentication-operator-69f744f599-cbtlt" (UID: "4926f4a2-0ee6-444b-a113-f6ee1d162d72") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.384173 4923 secret.go:188] Couldn't get secret openshift-console/console-oauth-config: failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.384182 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e7f08880-f8db-4170-8d1d-1bccb2df10f4-serving-cert podName:e7f08880-f8db-4170-8d1d-1bccb2df10f4 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.384173935 +0000 UTC m=+135.512858135 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e7f08880-f8db-4170-8d1d-1bccb2df10f4-serving-cert") pod "openshift-config-operator-7777fb866f-mqbzf" (UID: "e7f08880-f8db-4170-8d1d-1bccb2df10f4") : failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.384195 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-oauth-config podName:aa97fc63-7e09-4217-9fb9-78fca4703f04 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.384189296 +0000 UTC m=+135.512873506 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-oauth-config") pod "console-f9d7485db-2vsdg" (UID: "aa97fc63-7e09-4217-9fb9-78fca4703f04") : failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.384210 4923 configmap.go:193] Couldn't get configMap openshift-route-controller-manager/client-ca: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.384229 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-client-ca podName:cdedfd6e-9082-4411-b128-fc9806c67bd3 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.384222597 +0000 UTC m=+135.512906807 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-client-ca") pod "route-controller-manager-6576b87f9c-p7flx" (UID: "cdedfd6e-9082-4411-b128-fc9806c67bd3") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.384262 4923 configmap.go:193] Couldn't get configMap openshift-route-controller-manager/config: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.384277 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-config podName:cdedfd6e-9082-4411-b128-fc9806c67bd3 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.384272828 +0000 UTC m=+135.512957038 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-config") pod "route-controller-manager-6576b87f9c-p7flx" (UID: "cdedfd6e-9082-4411-b128-fc9806c67bd3") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.385967 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.402721 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.439998 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.461172 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w6d2b" event={"ID":"af02ba5e-e22d-4363-a5e7-6a819c881eeb","Type":"ContainerStarted","Data":"d2c5da30cbf6fa42d3e6d4779d41e7afd2cc7f1ece7b4eb9584fec007d45f42e"} Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.461205 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w6d2b" event={"ID":"af02ba5e-e22d-4363-a5e7-6a819c881eeb","Type":"ContainerStarted","Data":"efc98bc2bbf638ee8f2b7b30176f730ea9a0f6b6e4c2e0c299a6b69941e31e15"} Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.462766 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.462961 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.463450 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-w99dl"] Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.473457 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" event={"ID":"c708f3f9-1c78-43ee-8630-add159a49c49","Type":"ContainerStarted","Data":"f12bfff040ab05abfb28a0efe1a6b8bca06f46ca584e50cab078b8253d5bf014"} Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.494398 4923 secret.go:188] Couldn't get secret openshift-cluster-machine-approver/machine-approver-tls: failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.494475 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-machine-approver-tls podName:1b9cce3b-92fb-45f2-a81a-4a0a722ed13e nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.494456644 +0000 UTC m=+135.623140854 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "machine-approver-tls" (UniqueName: "kubernetes.io/secret/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-machine-approver-tls") pod "machine-approver-56656f9798-ckhbm" (UID: "1b9cce3b-92fb-45f2-a81a-4a0a722ed13e") : failed to sync secret cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.494760 4923 configmap.go:193] Couldn't get configMap openshift-cluster-machine-approver/kube-rbac-proxy: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.494794 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-auth-proxy-config podName:1b9cce3b-92fb-45f2-a81a-4a0a722ed13e nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.494783313 +0000 UTC m=+135.623467523 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "auth-proxy-config" (UniqueName: "kubernetes.io/configmap/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-auth-proxy-config") pod "machine-approver-56656f9798-ckhbm" (UID: "1b9cce3b-92fb-45f2-a81a-4a0a722ed13e") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.500328 4923 configmap.go:193] Couldn't get configMap openshift-cluster-machine-approver/machine-approver-config: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.500369 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-config podName:1b9cce3b-92fb-45f2-a81a-4a0a722ed13e nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.500360041 +0000 UTC m=+135.629044251 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-config") pod "machine-approver-56656f9798-ckhbm" (UID: "1b9cce3b-92fb-45f2-a81a-4a0a722ed13e") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.512214 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.512433 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.531287 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll"] Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.531535 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.544299 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.545505 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd"] Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.557547 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhqs5\" (UniqueName: \"kubernetes.io/projected/abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff-kube-api-access-rhqs5\") pod \"console-operator-58897d9998-tz5lm\" (UID: \"abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff\") " pod="openshift-console-operator/console-operator-58897d9998-tz5lm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.573996 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-28cv6"] Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.580645 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9"] Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.584238 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.609836 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.619737 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/a49569a7-dda0-4856-816e-296642ddbdff-default-certificate\") pod \"router-default-5444994796-dfffg\" (UID: \"a49569a7-dda0-4856-816e-296642ddbdff\") " pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.619780 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6f176857-50d2-41c7-8237-961e330c629d-registry-certificates\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.619890 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/260bf9ee-f05f-4598-9bf9-de7ed2d1723f-profile-collector-cert\") pod \"olm-operator-6b444d44fb-kwrnm\" (UID: \"260bf9ee-f05f-4598-9bf9-de7ed2d1723f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.619997 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/03781a39-3788-4e8a-9a0d-d97c3fb9e4b3-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-l7sqm\" (UID: \"03781a39-3788-4e8a-9a0d-d97c3fb9e4b3\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-l7sqm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.620024 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f8ec4511-563c-43b0-ad0d-b7916aeee89a-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-nmc8h\" (UID: \"f8ec4511-563c-43b0-ad0d-b7916aeee89a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nmc8h" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.620048 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14ef552e-14c1-49e6-b06d-0736e2a3ed73-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-dxdgg\" (UID: \"14ef552e-14c1-49e6-b06d-0736e2a3ed73\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dxdgg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.620065 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kw89r\" (UniqueName: \"kubernetes.io/projected/7638a2ec-d85c-456d-9d1b-9e56d83eae4b-kube-api-access-kw89r\") pod \"downloads-7954f5f757-fd2jt\" (UID: \"7638a2ec-d85c-456d-9d1b-9e56d83eae4b\") " pod="openshift-console/downloads-7954f5f757-fd2jt" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.620087 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03781a39-3788-4e8a-9a0d-d97c3fb9e4b3-config\") pod \"kube-apiserver-operator-766d6c64bb-l7sqm\" (UID: \"03781a39-3788-4e8a-9a0d-d97c3fb9e4b3\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-l7sqm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.620128 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shllm\" (UniqueName: \"kubernetes.io/projected/31885971-a674-4f47-999b-d7e5435f34d0-kube-api-access-shllm\") pod \"machine-config-controller-84d6567774-hcztj\" (UID: \"31885971-a674-4f47-999b-d7e5435f34d0\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hcztj" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.620175 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31885971-a674-4f47-999b-d7e5435f34d0-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-hcztj\" (UID: \"31885971-a674-4f47-999b-d7e5435f34d0\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hcztj" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.620234 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wvsb\" (UniqueName: \"kubernetes.io/projected/a49569a7-dda0-4856-816e-296642ddbdff-kube-api-access-5wvsb\") pod \"router-default-5444994796-dfffg\" (UID: \"a49569a7-dda0-4856-816e-296642ddbdff\") " pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.620261 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/bc404fb9-c265-4265-84e8-e3dd111fae9a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-lswhk\" (UID: \"bc404fb9-c265-4265-84e8-e3dd111fae9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.620285 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6f176857-50d2-41c7-8237-961e330c629d-bound-sa-token\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.620302 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmgnv\" (UniqueName: \"kubernetes.io/projected/6f176857-50d2-41c7-8237-961e330c629d-kube-api-access-tmgnv\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.620819 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8ec4511-563c-43b0-ad0d-b7916aeee89a-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-nmc8h\" (UID: \"f8ec4511-563c-43b0-ad0d-b7916aeee89a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nmc8h" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.620964 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlpgs\" (UniqueName: \"kubernetes.io/projected/0755523c-a607-4b3e-966e-cb31294dde65-kube-api-access-xlpgs\") pod \"cluster-image-registry-operator-dc59b4c8b-6t9g2\" (UID: \"0755523c-a607-4b3e-966e-cb31294dde65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.620995 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjxtn\" (UniqueName: \"kubernetes.io/projected/260bf9ee-f05f-4598-9bf9-de7ed2d1723f-kube-api-access-hjxtn\") pod \"olm-operator-6b444d44fb-kwrnm\" (UID: \"260bf9ee-f05f-4598-9bf9-de7ed2d1723f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.621022 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/a49569a7-dda0-4856-816e-296642ddbdff-stats-auth\") pod \"router-default-5444994796-dfffg\" (UID: \"a49569a7-dda0-4856-816e-296642ddbdff\") " pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.621073 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6f176857-50d2-41c7-8237-961e330c629d-installation-pull-secrets\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.621757 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f176857-50d2-41c7-8237-961e330c629d-trusted-ca\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.622229 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/3a41216a-9d26-4691-aa6b-8a50c0a94016-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-f4dfh\" (UID: \"3a41216a-9d26-4691-aa6b-8a50c0a94016\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-f4dfh" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.622960 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bc404fb9-c265-4265-84e8-e3dd111fae9a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-lswhk\" (UID: \"bc404fb9-c265-4265-84e8-e3dd111fae9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623048 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwlcz\" (UniqueName: \"kubernetes.io/projected/ed45dc51-25a9-47f1-b80b-de4288627e50-kube-api-access-kwlcz\") pod \"openshift-controller-manager-operator-756b6f6bc6-jhjvz\" (UID: \"ed45dc51-25a9-47f1-b80b-de4288627e50\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jhjvz" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623073 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31885971-a674-4f47-999b-d7e5435f34d0-proxy-tls\") pod \"machine-config-controller-84d6567774-hcztj\" (UID: \"31885971-a674-4f47-999b-d7e5435f34d0\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hcztj" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623132 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03781a39-3788-4e8a-9a0d-d97c3fb9e4b3-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-l7sqm\" (UID: \"03781a39-3788-4e8a-9a0d-d97c3fb9e4b3\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-l7sqm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623154 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed45dc51-25a9-47f1-b80b-de4288627e50-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-jhjvz\" (UID: \"ed45dc51-25a9-47f1-b80b-de4288627e50\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jhjvz" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623196 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/0755523c-a607-4b3e-966e-cb31294dde65-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-6t9g2\" (UID: \"0755523c-a607-4b3e-966e-cb31294dde65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623279 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0755523c-a607-4b3e-966e-cb31294dde65-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-6t9g2\" (UID: \"0755523c-a607-4b3e-966e-cb31294dde65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623303 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a49569a7-dda0-4856-816e-296642ddbdff-metrics-certs\") pod \"router-default-5444994796-dfffg\" (UID: \"a49569a7-dda0-4856-816e-296642ddbdff\") " pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623346 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed45dc51-25a9-47f1-b80b-de4288627e50-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-jhjvz\" (UID: \"ed45dc51-25a9-47f1-b80b-de4288627e50\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jhjvz" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623365 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14ef552e-14c1-49e6-b06d-0736e2a3ed73-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-dxdgg\" (UID: \"14ef552e-14c1-49e6-b06d-0736e2a3ed73\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dxdgg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623395 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14ef552e-14c1-49e6-b06d-0736e2a3ed73-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-dxdgg\" (UID: \"14ef552e-14c1-49e6-b06d-0736e2a3ed73\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dxdgg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623447 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623476 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a49569a7-dda0-4856-816e-296642ddbdff-service-ca-bundle\") pod \"router-default-5444994796-dfffg\" (UID: \"a49569a7-dda0-4856-816e-296642ddbdff\") " pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623511 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzcw8\" (UniqueName: \"kubernetes.io/projected/933d252a-8d35-415c-9e4e-754dd933be46-kube-api-access-tzcw8\") pod \"migrator-59844c95c7-8cknx\" (UID: \"933d252a-8d35-415c-9e4e-754dd933be46\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-8cknx" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623547 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0755523c-a607-4b3e-966e-cb31294dde65-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-6t9g2\" (UID: \"0755523c-a607-4b3e-966e-cb31294dde65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623621 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lmqc\" (UniqueName: \"kubernetes.io/projected/f8ec4511-563c-43b0-ad0d-b7916aeee89a-kube-api-access-9lmqc\") pod \"kube-storage-version-migrator-operator-b67b599dd-nmc8h\" (UID: \"f8ec4511-563c-43b0-ad0d-b7916aeee89a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nmc8h" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623664 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzgn4\" (UniqueName: \"kubernetes.io/projected/3a41216a-9d26-4691-aa6b-8a50c0a94016-kube-api-access-pzgn4\") pod \"control-plane-machine-set-operator-78cbb6b69f-f4dfh\" (UID: \"3a41216a-9d26-4691-aa6b-8a50c0a94016\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-f4dfh" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623769 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6f176857-50d2-41c7-8237-961e330c629d-registry-tls\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623793 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ll6pl\" (UniqueName: \"kubernetes.io/projected/bc404fb9-c265-4265-84e8-e3dd111fae9a-kube-api-access-ll6pl\") pod \"marketplace-operator-79b997595-lswhk\" (UID: \"bc404fb9-c265-4265-84e8-e3dd111fae9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623880 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/260bf9ee-f05f-4598-9bf9-de7ed2d1723f-srv-cert\") pod \"olm-operator-6b444d44fb-kwrnm\" (UID: \"260bf9ee-f05f-4598-9bf9-de7ed2d1723f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.623898 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6f176857-50d2-41c7-8237-961e330c629d-ca-trust-extracted\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.625347 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.625380 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.125363715 +0000 UTC m=+135.254047925 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.644693 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.659765 4923 projected.go:288] Couldn't get configMap openshift-console/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.659802 4923 projected.go:194] Error preparing data for projected volume kube-api-access-wwmwf for pod openshift-console/console-f9d7485db-2vsdg: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.659865 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/aa97fc63-7e09-4217-9fb9-78fca4703f04-kube-api-access-wwmwf podName:aa97fc63-7e09-4217-9fb9-78fca4703f04 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.15984836 +0000 UTC m=+135.288532570 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-wwmwf" (UniqueName: "kubernetes.io/projected/aa97fc63-7e09-4217-9fb9-78fca4703f04-kube-api-access-wwmwf") pod "console-f9d7485db-2vsdg" (UID: "aa97fc63-7e09-4217-9fb9-78fca4703f04") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.669519 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.684953 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.713628 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.726381 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.726514 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/a49569a7-dda0-4856-816e-296642ddbdff-stats-auth\") pod \"router-default-5444994796-dfffg\" (UID: \"a49569a7-dda0-4856-816e-296642ddbdff\") " pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.726560 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6f176857-50d2-41c7-8237-961e330c629d-installation-pull-secrets\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.726613 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.226590907 +0000 UTC m=+135.355275117 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.726660 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03fb8831-5c58-4bcf-8e9c-d18d0074be3e-serving-cert\") pod \"etcd-operator-b45778765-s5wfn\" (UID: \"03fb8831-5c58-4bcf-8e9c-d18d0074be3e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.726712 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f176857-50d2-41c7-8237-961e330c629d-trusted-ca\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.726736 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/c8ae2931-245c-49b5-9844-432bc6ecf3cc-certs\") pod \"machine-config-server-czm5s\" (UID: \"c8ae2931-245c-49b5-9844-432bc6ecf3cc\") " pod="openshift-machine-config-operator/machine-config-server-czm5s" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.726756 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/3a41216a-9d26-4691-aa6b-8a50c0a94016-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-f4dfh\" (UID: \"3a41216a-9d26-4691-aa6b-8a50c0a94016\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-f4dfh" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.726781 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bc404fb9-c265-4265-84e8-e3dd111fae9a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-lswhk\" (UID: \"bc404fb9-c265-4265-84e8-e3dd111fae9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.726801 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d25bad00-52c9-449a-a73b-b53b4c4f2577-proxy-tls\") pod \"machine-config-operator-74547568cd-7j5p2\" (UID: \"d25bad00-52c9-449a-a73b-b53b4c4f2577\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.726834 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ce29e4b8-83fb-402d-a969-efa9106fdf29-mountpoint-dir\") pod \"csi-hostpathplugin-9x5dg\" (UID: \"ce29e4b8-83fb-402d-a969-efa9106fdf29\") " pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.726853 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89-config-volume\") pod \"collect-profiles-29405460-778cj\" (UID: \"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.726867 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d25bad00-52c9-449a-a73b-b53b4c4f2577-images\") pod \"machine-config-operator-74547568cd-7j5p2\" (UID: \"d25bad00-52c9-449a-a73b-b53b4c4f2577\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.726886 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwlcz\" (UniqueName: \"kubernetes.io/projected/ed45dc51-25a9-47f1-b80b-de4288627e50-kube-api-access-kwlcz\") pod \"openshift-controller-manager-operator-756b6f6bc6-jhjvz\" (UID: \"ed45dc51-25a9-47f1-b80b-de4288627e50\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jhjvz" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.726905 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31885971-a674-4f47-999b-d7e5435f34d0-proxy-tls\") pod \"machine-config-controller-84d6567774-hcztj\" (UID: \"31885971-a674-4f47-999b-d7e5435f34d0\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hcztj" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.726957 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/03fb8831-5c58-4bcf-8e9c-d18d0074be3e-etcd-client\") pod \"etcd-operator-b45778765-s5wfn\" (UID: \"03fb8831-5c58-4bcf-8e9c-d18d0074be3e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.726975 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03781a39-3788-4e8a-9a0d-d97c3fb9e4b3-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-l7sqm\" (UID: \"03781a39-3788-4e8a-9a0d-d97c3fb9e4b3\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-l7sqm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727000 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed45dc51-25a9-47f1-b80b-de4288627e50-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-jhjvz\" (UID: \"ed45dc51-25a9-47f1-b80b-de4288627e50\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jhjvz" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727025 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/0755523c-a607-4b3e-966e-cb31294dde65-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-6t9g2\" (UID: \"0755523c-a607-4b3e-966e-cb31294dde65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727042 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7vrt\" (UniqueName: \"kubernetes.io/projected/641c9aa3-7da2-4122-9410-cd46d1733143-kube-api-access-m7vrt\") pod \"ingress-canary-8b2zb\" (UID: \"641c9aa3-7da2-4122-9410-cd46d1733143\") " pod="openshift-ingress-canary/ingress-canary-8b2zb" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727058 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/74a3fc68-fb6b-4070-85ea-a4e70aa6406b-signing-cabundle\") pod \"service-ca-9c57cc56f-k9zcz\" (UID: \"74a3fc68-fb6b-4070-85ea-a4e70aa6406b\") " pod="openshift-service-ca/service-ca-9c57cc56f-k9zcz" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727087 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4b125207-13c8-4142-86d1-99645442eddf-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-m6kq9\" (UID: \"4b125207-13c8-4142-86d1-99645442eddf\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-m6kq9" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727105 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a49569a7-dda0-4856-816e-296642ddbdff-metrics-certs\") pod \"router-default-5444994796-dfffg\" (UID: \"a49569a7-dda0-4856-816e-296642ddbdff\") " pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727138 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0755523c-a607-4b3e-966e-cb31294dde65-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-6t9g2\" (UID: \"0755523c-a607-4b3e-966e-cb31294dde65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727153 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed45dc51-25a9-47f1-b80b-de4288627e50-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-jhjvz\" (UID: \"ed45dc51-25a9-47f1-b80b-de4288627e50\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jhjvz" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727171 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14ef552e-14c1-49e6-b06d-0736e2a3ed73-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-dxdgg\" (UID: \"14ef552e-14c1-49e6-b06d-0736e2a3ed73\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dxdgg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727188 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/641c9aa3-7da2-4122-9410-cd46d1733143-cert\") pod \"ingress-canary-8b2zb\" (UID: \"641c9aa3-7da2-4122-9410-cd46d1733143\") " pod="openshift-ingress-canary/ingress-canary-8b2zb" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727212 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14ef552e-14c1-49e6-b06d-0736e2a3ed73-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-dxdgg\" (UID: \"14ef552e-14c1-49e6-b06d-0736e2a3ed73\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dxdgg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727257 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727280 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a49569a7-dda0-4856-816e-296642ddbdff-service-ca-bundle\") pod \"router-default-5444994796-dfffg\" (UID: \"a49569a7-dda0-4856-816e-296642ddbdff\") " pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727296 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzcw8\" (UniqueName: \"kubernetes.io/projected/933d252a-8d35-415c-9e4e-754dd933be46-kube-api-access-tzcw8\") pod \"migrator-59844c95c7-8cknx\" (UID: \"933d252a-8d35-415c-9e4e-754dd933be46\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-8cknx" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727336 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0755523c-a607-4b3e-966e-cb31294dde65-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-6t9g2\" (UID: \"0755523c-a607-4b3e-966e-cb31294dde65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727354 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00c5c14d-d989-41a8-8447-c1a9c47426a2-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zfzrm\" (UID: \"00c5c14d-d989-41a8-8447-c1a9c47426a2\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zfzrm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727372 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lmqc\" (UniqueName: \"kubernetes.io/projected/f8ec4511-563c-43b0-ad0d-b7916aeee89a-kube-api-access-9lmqc\") pod \"kube-storage-version-migrator-operator-b67b599dd-nmc8h\" (UID: \"f8ec4511-563c-43b0-ad0d-b7916aeee89a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nmc8h" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727387 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/03fb8831-5c58-4bcf-8e9c-d18d0074be3e-etcd-ca\") pod \"etcd-operator-b45778765-s5wfn\" (UID: \"03fb8831-5c58-4bcf-8e9c-d18d0074be3e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727411 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8k6g4\" (UniqueName: \"kubernetes.io/projected/37ddbfb0-c042-460d-b772-9cdd214a79a1-kube-api-access-8k6g4\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727426 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ce29e4b8-83fb-402d-a969-efa9106fdf29-registration-dir\") pod \"csi-hostpathplugin-9x5dg\" (UID: \"ce29e4b8-83fb-402d-a969-efa9106fdf29\") " pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727445 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzgn4\" (UniqueName: \"kubernetes.io/projected/3a41216a-9d26-4691-aa6b-8a50c0a94016-kube-api-access-pzgn4\") pod \"control-plane-machine-set-operator-78cbb6b69f-f4dfh\" (UID: \"3a41216a-9d26-4691-aa6b-8a50c0a94016\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-f4dfh" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727461 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6f176857-50d2-41c7-8237-961e330c629d-registry-tls\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727476 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ll6pl\" (UniqueName: \"kubernetes.io/projected/bc404fb9-c265-4265-84e8-e3dd111fae9a-kube-api-access-ll6pl\") pod \"marketplace-operator-79b997595-lswhk\" (UID: \"bc404fb9-c265-4265-84e8-e3dd111fae9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727502 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/03fb8831-5c58-4bcf-8e9c-d18d0074be3e-etcd-service-ca\") pod \"etcd-operator-b45778765-s5wfn\" (UID: \"03fb8831-5c58-4bcf-8e9c-d18d0074be3e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727517 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/260bf9ee-f05f-4598-9bf9-de7ed2d1723f-srv-cert\") pod \"olm-operator-6b444d44fb-kwrnm\" (UID: \"260bf9ee-f05f-4598-9bf9-de7ed2d1723f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727543 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6f176857-50d2-41c7-8237-961e330c629d-ca-trust-extracted\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727575 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/a49569a7-dda0-4856-816e-296642ddbdff-default-certificate\") pod \"router-default-5444994796-dfffg\" (UID: \"a49569a7-dda0-4856-816e-296642ddbdff\") " pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727608 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6f176857-50d2-41c7-8237-961e330c629d-registry-certificates\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727625 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmzh7\" (UniqueName: \"kubernetes.io/projected/d25bad00-52c9-449a-a73b-b53b4c4f2577-kube-api-access-lmzh7\") pod \"machine-config-operator-74547568cd-7j5p2\" (UID: \"d25bad00-52c9-449a-a73b-b53b4c4f2577\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727641 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ce29e4b8-83fb-402d-a969-efa9106fdf29-socket-dir\") pod \"csi-hostpathplugin-9x5dg\" (UID: \"ce29e4b8-83fb-402d-a969-efa9106fdf29\") " pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727671 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8dmq\" (UniqueName: \"kubernetes.io/projected/03fb8831-5c58-4bcf-8e9c-d18d0074be3e-kube-api-access-p8dmq\") pod \"etcd-operator-b45778765-s5wfn\" (UID: \"03fb8831-5c58-4bcf-8e9c-d18d0074be3e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727689 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/260bf9ee-f05f-4598-9bf9-de7ed2d1723f-profile-collector-cert\") pod \"olm-operator-6b444d44fb-kwrnm\" (UID: \"260bf9ee-f05f-4598-9bf9-de7ed2d1723f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727705 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6rgf\" (UniqueName: \"kubernetes.io/projected/3de372b7-d84f-46f7-b773-255e32d0e882-kube-api-access-m6rgf\") pod \"package-server-manager-789f6589d5-cnw7p\" (UID: \"3de372b7-d84f-46f7-b773-255e32d0e882\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cnw7p" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727749 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/beb2a474-a9eb-4a5a-915b-003a2654a0d0-config-volume\") pod \"dns-default-m4cv2\" (UID: \"beb2a474-a9eb-4a5a-915b-003a2654a0d0\") " pod="openshift-dns/dns-default-m4cv2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727781 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8ptf\" (UniqueName: \"kubernetes.io/projected/beb2a474-a9eb-4a5a-915b-003a2654a0d0-kube-api-access-p8ptf\") pod \"dns-default-m4cv2\" (UID: \"beb2a474-a9eb-4a5a-915b-003a2654a0d0\") " pod="openshift-dns/dns-default-m4cv2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727834 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/c8ae2931-245c-49b5-9844-432bc6ecf3cc-node-bootstrap-token\") pod \"machine-config-server-czm5s\" (UID: \"c8ae2931-245c-49b5-9844-432bc6ecf3cc\") " pod="openshift-machine-config-operator/machine-config-server-czm5s" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727852 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h675h\" (UniqueName: \"kubernetes.io/projected/c8ae2931-245c-49b5-9844-432bc6ecf3cc-kube-api-access-h675h\") pod \"machine-config-server-czm5s\" (UID: \"c8ae2931-245c-49b5-9844-432bc6ecf3cc\") " pod="openshift-machine-config-operator/machine-config-server-czm5s" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727898 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/03781a39-3788-4e8a-9a0d-d97c3fb9e4b3-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-l7sqm\" (UID: \"03781a39-3788-4e8a-9a0d-d97c3fb9e4b3\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-l7sqm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727916 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42vrg\" (UniqueName: \"kubernetes.io/projected/4b125207-13c8-4142-86d1-99645442eddf-kube-api-access-42vrg\") pod \"multus-admission-controller-857f4d67dd-m6kq9\" (UID: \"4b125207-13c8-4142-86d1-99645442eddf\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-m6kq9" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727946 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ce29e4b8-83fb-402d-a969-efa9106fdf29-plugins-dir\") pod \"csi-hostpathplugin-9x5dg\" (UID: \"ce29e4b8-83fb-402d-a969-efa9106fdf29\") " pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727965 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f8ec4511-563c-43b0-ad0d-b7916aeee89a-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-nmc8h\" (UID: \"f8ec4511-563c-43b0-ad0d-b7916aeee89a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nmc8h" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727981 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14ef552e-14c1-49e6-b06d-0736e2a3ed73-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-dxdgg\" (UID: \"14ef552e-14c1-49e6-b06d-0736e2a3ed73\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dxdgg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727998 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kw89r\" (UniqueName: \"kubernetes.io/projected/7638a2ec-d85c-456d-9d1b-9e56d83eae4b-kube-api-access-kw89r\") pod \"downloads-7954f5f757-fd2jt\" (UID: \"7638a2ec-d85c-456d-9d1b-9e56d83eae4b\") " pod="openshift-console/downloads-7954f5f757-fd2jt" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728015 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mj9gd\" (UniqueName: \"kubernetes.io/projected/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89-kube-api-access-mj9gd\") pod \"collect-profiles-29405460-778cj\" (UID: \"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728029 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ce29e4b8-83fb-402d-a969-efa9106fdf29-csi-data-dir\") pod \"csi-hostpathplugin-9x5dg\" (UID: \"ce29e4b8-83fb-402d-a969-efa9106fdf29\") " pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728044 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3de372b7-d84f-46f7-b773-255e32d0e882-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-cnw7p\" (UID: \"3de372b7-d84f-46f7-b773-255e32d0e882\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cnw7p" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728059 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03fb8831-5c58-4bcf-8e9c-d18d0074be3e-config\") pod \"etcd-operator-b45778765-s5wfn\" (UID: \"03fb8831-5c58-4bcf-8e9c-d18d0074be3e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728086 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03781a39-3788-4e8a-9a0d-d97c3fb9e4b3-config\") pod \"kube-apiserver-operator-766d6c64bb-l7sqm\" (UID: \"03781a39-3788-4e8a-9a0d-d97c3fb9e4b3\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-l7sqm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728106 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shllm\" (UniqueName: \"kubernetes.io/projected/31885971-a674-4f47-999b-d7e5435f34d0-kube-api-access-shllm\") pod \"machine-config-controller-84d6567774-hcztj\" (UID: \"31885971-a674-4f47-999b-d7e5435f34d0\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hcztj" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728144 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xq7bp\" (UniqueName: \"kubernetes.io/projected/9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba-kube-api-access-xq7bp\") pod \"service-ca-operator-777779d784-44p4v\" (UID: \"9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-44p4v" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728167 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00c5c14d-d989-41a8-8447-c1a9c47426a2-config\") pod \"kube-controller-manager-operator-78b949d7b-zfzrm\" (UID: \"00c5c14d-d989-41a8-8447-c1a9c47426a2\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zfzrm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728191 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31885971-a674-4f47-999b-d7e5435f34d0-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-hcztj\" (UID: \"31885971-a674-4f47-999b-d7e5435f34d0\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hcztj" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728207 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba-serving-cert\") pod \"service-ca-operator-777779d784-44p4v\" (UID: \"9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-44p4v" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728229 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89-secret-volume\") pod \"collect-profiles-29405460-778cj\" (UID: \"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728234 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14ef552e-14c1-49e6-b06d-0736e2a3ed73-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-dxdgg\" (UID: \"14ef552e-14c1-49e6-b06d-0736e2a3ed73\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dxdgg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728319 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wvsb\" (UniqueName: \"kubernetes.io/projected/a49569a7-dda0-4856-816e-296642ddbdff-kube-api-access-5wvsb\") pod \"router-default-5444994796-dfffg\" (UID: \"a49569a7-dda0-4856-816e-296642ddbdff\") " pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728341 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/bc404fb9-c265-4265-84e8-e3dd111fae9a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-lswhk\" (UID: \"bc404fb9-c265-4265-84e8-e3dd111fae9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728363 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba-config\") pod \"service-ca-operator-777779d784-44p4v\" (UID: \"9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-44p4v" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728396 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6f176857-50d2-41c7-8237-961e330c629d-bound-sa-token\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728416 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmgnv\" (UniqueName: \"kubernetes.io/projected/6f176857-50d2-41c7-8237-961e330c629d-kube-api-access-tmgnv\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728450 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/00c5c14d-d989-41a8-8447-c1a9c47426a2-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zfzrm\" (UID: \"00c5c14d-d989-41a8-8447-c1a9c47426a2\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zfzrm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728471 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmbjb\" (UniqueName: \"kubernetes.io/projected/74a3fc68-fb6b-4070-85ea-a4e70aa6406b-kube-api-access-hmbjb\") pod \"service-ca-9c57cc56f-k9zcz\" (UID: \"74a3fc68-fb6b-4070-85ea-a4e70aa6406b\") " pod="openshift-service-ca/service-ca-9c57cc56f-k9zcz" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728514 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8ec4511-563c-43b0-ad0d-b7916aeee89a-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-nmc8h\" (UID: \"f8ec4511-563c-43b0-ad0d-b7916aeee89a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nmc8h" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728561 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/beb2a474-a9eb-4a5a-915b-003a2654a0d0-metrics-tls\") pod \"dns-default-m4cv2\" (UID: \"beb2a474-a9eb-4a5a-915b-003a2654a0d0\") " pod="openshift-dns/dns-default-m4cv2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728578 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkd68\" (UniqueName: \"kubernetes.io/projected/ce29e4b8-83fb-402d-a969-efa9106fdf29-kube-api-access-jkd68\") pod \"csi-hostpathplugin-9x5dg\" (UID: \"ce29e4b8-83fb-402d-a969-efa9106fdf29\") " pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728594 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d25bad00-52c9-449a-a73b-b53b4c4f2577-auth-proxy-config\") pod \"machine-config-operator-74547568cd-7j5p2\" (UID: \"d25bad00-52c9-449a-a73b-b53b4c4f2577\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728625 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlpgs\" (UniqueName: \"kubernetes.io/projected/0755523c-a607-4b3e-966e-cb31294dde65-kube-api-access-xlpgs\") pod \"cluster-image-registry-operator-dc59b4c8b-6t9g2\" (UID: \"0755523c-a607-4b3e-966e-cb31294dde65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728645 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjxtn\" (UniqueName: \"kubernetes.io/projected/260bf9ee-f05f-4598-9bf9-de7ed2d1723f-kube-api-access-hjxtn\") pod \"olm-operator-6b444d44fb-kwrnm\" (UID: \"260bf9ee-f05f-4598-9bf9-de7ed2d1723f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.728665 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/74a3fc68-fb6b-4070-85ea-a4e70aa6406b-signing-key\") pod \"service-ca-9c57cc56f-k9zcz\" (UID: \"74a3fc68-fb6b-4070-85ea-a4e70aa6406b\") " pod="openshift-service-ca/service-ca-9c57cc56f-k9zcz" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.729716 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a49569a7-dda0-4856-816e-296642ddbdff-service-ca-bundle\") pod \"router-default-5444994796-dfffg\" (UID: \"a49569a7-dda0-4856-816e-296642ddbdff\") " pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.727668 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.730561 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phrq8\" (UniqueName: \"kubernetes.io/projected/889057c8-1eb2-4829-b1d5-a906b88eb68c-kube-api-access-phrq8\") pod \"cluster-samples-operator-665b6dd947-j6dnf\" (UID: \"889057c8-1eb2-4829-b1d5-a906b88eb68c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-j6dnf" Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.728913 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.228906743 +0000 UTC m=+135.357590953 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.731715 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31885971-a674-4f47-999b-d7e5435f34d0-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-hcztj\" (UID: \"31885971-a674-4f47-999b-d7e5435f34d0\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hcztj" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.731966 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03781a39-3788-4e8a-9a0d-d97c3fb9e4b3-config\") pod \"kube-apiserver-operator-766d6c64bb-l7sqm\" (UID: \"03781a39-3788-4e8a-9a0d-d97c3fb9e4b3\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-l7sqm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.732492 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8ec4511-563c-43b0-ad0d-b7916aeee89a-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-nmc8h\" (UID: \"f8ec4511-563c-43b0-ad0d-b7916aeee89a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nmc8h" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.732896 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed45dc51-25a9-47f1-b80b-de4288627e50-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-jhjvz\" (UID: \"ed45dc51-25a9-47f1-b80b-de4288627e50\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jhjvz" Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.737462 4923 projected.go:288] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.737941 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6f176857-50d2-41c7-8237-961e330c629d-installation-pull-secrets\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.739366 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6f176857-50d2-41c7-8237-961e330c629d-registry-certificates\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.740810 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/a49569a7-dda0-4856-816e-296642ddbdff-default-certificate\") pod \"router-default-5444994796-dfffg\" (UID: \"a49569a7-dda0-4856-816e-296642ddbdff\") " pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.741208 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6f176857-50d2-41c7-8237-961e330c629d-ca-trust-extracted\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.744587 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bc404fb9-c265-4265-84e8-e3dd111fae9a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-lswhk\" (UID: \"bc404fb9-c265-4265-84e8-e3dd111fae9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.748303 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f8ec4511-563c-43b0-ad0d-b7916aeee89a-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-nmc8h\" (UID: \"f8ec4511-563c-43b0-ad0d-b7916aeee89a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nmc8h" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.749597 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/bc404fb9-c265-4265-84e8-e3dd111fae9a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-lswhk\" (UID: \"bc404fb9-c265-4265-84e8-e3dd111fae9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.750226 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.754522 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/260bf9ee-f05f-4598-9bf9-de7ed2d1723f-srv-cert\") pod \"olm-operator-6b444d44fb-kwrnm\" (UID: \"260bf9ee-f05f-4598-9bf9-de7ed2d1723f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm" Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.754594 4923 projected.go:288] Couldn't get configMap openshift-config-operator/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.758301 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/260bf9ee-f05f-4598-9bf9-de7ed2d1723f-profile-collector-cert\") pod \"olm-operator-6b444d44fb-kwrnm\" (UID: \"260bf9ee-f05f-4598-9bf9-de7ed2d1723f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.766287 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/a49569a7-dda0-4856-816e-296642ddbdff-stats-auth\") pod \"router-default-5444994796-dfffg\" (UID: \"a49569a7-dda0-4856-816e-296642ddbdff\") " pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.770387 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed45dc51-25a9-47f1-b80b-de4288627e50-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-jhjvz\" (UID: \"ed45dc51-25a9-47f1-b80b-de4288627e50\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jhjvz" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.770972 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8k6g4\" (UniqueName: \"kubernetes.io/projected/37ddbfb0-c042-460d-b772-9cdd214a79a1-kube-api-access-8k6g4\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.771534 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14ef552e-14c1-49e6-b06d-0736e2a3ed73-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-dxdgg\" (UID: \"14ef552e-14c1-49e6-b06d-0736e2a3ed73\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dxdgg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.771982 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/3a41216a-9d26-4691-aa6b-8a50c0a94016-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-f4dfh\" (UID: \"3a41216a-9d26-4691-aa6b-8a50c0a94016\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-f4dfh" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.772409 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03781a39-3788-4e8a-9a0d-d97c3fb9e4b3-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-l7sqm\" (UID: \"03781a39-3788-4e8a-9a0d-d97c3fb9e4b3\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-l7sqm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.774550 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6f176857-50d2-41c7-8237-961e330c629d-registry-tls\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.775226 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31885971-a674-4f47-999b-d7e5435f34d0-proxy-tls\") pod \"machine-config-controller-84d6567774-hcztj\" (UID: \"31885971-a674-4f47-999b-d7e5435f34d0\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hcztj" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.775528 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.780801 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a49569a7-dda0-4856-816e-296642ddbdff-metrics-certs\") pod \"router-default-5444994796-dfffg\" (UID: \"a49569a7-dda0-4856-816e-296642ddbdff\") " pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.781792 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f176857-50d2-41c7-8237-961e330c629d-trusted-ca\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.782017 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.784481 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0755523c-a607-4b3e-966e-cb31294dde65-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-6t9g2\" (UID: \"0755523c-a607-4b3e-966e-cb31294dde65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2" Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.788011 4923 projected.go:194] Error preparing data for projected volume kube-api-access-rvb8s for pod openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.788085 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cdedfd6e-9082-4411-b128-fc9806c67bd3-kube-api-access-rvb8s podName:cdedfd6e-9082-4411-b128-fc9806c67bd3 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.288069016 +0000 UTC m=+135.416753226 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-rvb8s" (UniqueName: "kubernetes.io/projected/cdedfd6e-9082-4411-b128-fc9806c67bd3-kube-api-access-rvb8s") pod "route-controller-manager-6576b87f9c-p7flx" (UID: "cdedfd6e-9082-4411-b128-fc9806c67bd3") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.800997 4923 request.go:700] Waited for 1.74466125s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-cluster-machine-approver/configmaps?fieldSelector=metadata.name%3Dmachine-approver-config&limit=500&resourceVersion=0 Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.801152 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-tz5lm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.801856 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.825455 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.831622 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.831819 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8dmq\" (UniqueName: \"kubernetes.io/projected/03fb8831-5c58-4bcf-8e9c-d18d0074be3e-kube-api-access-p8dmq\") pod \"etcd-operator-b45778765-s5wfn\" (UID: \"03fb8831-5c58-4bcf-8e9c-d18d0074be3e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.831908 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.331869794 +0000 UTC m=+135.460554004 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.831984 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6rgf\" (UniqueName: \"kubernetes.io/projected/3de372b7-d84f-46f7-b773-255e32d0e882-kube-api-access-m6rgf\") pod \"package-server-manager-789f6589d5-cnw7p\" (UID: \"3de372b7-d84f-46f7-b773-255e32d0e882\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cnw7p" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832043 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/beb2a474-a9eb-4a5a-915b-003a2654a0d0-config-volume\") pod \"dns-default-m4cv2\" (UID: \"beb2a474-a9eb-4a5a-915b-003a2654a0d0\") " pod="openshift-dns/dns-default-m4cv2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832077 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8ptf\" (UniqueName: \"kubernetes.io/projected/beb2a474-a9eb-4a5a-915b-003a2654a0d0-kube-api-access-p8ptf\") pod \"dns-default-m4cv2\" (UID: \"beb2a474-a9eb-4a5a-915b-003a2654a0d0\") " pod="openshift-dns/dns-default-m4cv2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832097 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/c8ae2931-245c-49b5-9844-432bc6ecf3cc-node-bootstrap-token\") pod \"machine-config-server-czm5s\" (UID: \"c8ae2931-245c-49b5-9844-432bc6ecf3cc\") " pod="openshift-machine-config-operator/machine-config-server-czm5s" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832117 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h675h\" (UniqueName: \"kubernetes.io/projected/c8ae2931-245c-49b5-9844-432bc6ecf3cc-kube-api-access-h675h\") pod \"machine-config-server-czm5s\" (UID: \"c8ae2931-245c-49b5-9844-432bc6ecf3cc\") " pod="openshift-machine-config-operator/machine-config-server-czm5s" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832167 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mj9gd\" (UniqueName: \"kubernetes.io/projected/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89-kube-api-access-mj9gd\") pod \"collect-profiles-29405460-778cj\" (UID: \"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832183 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42vrg\" (UniqueName: \"kubernetes.io/projected/4b125207-13c8-4142-86d1-99645442eddf-kube-api-access-42vrg\") pod \"multus-admission-controller-857f4d67dd-m6kq9\" (UID: \"4b125207-13c8-4142-86d1-99645442eddf\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-m6kq9" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832199 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ce29e4b8-83fb-402d-a969-efa9106fdf29-plugins-dir\") pod \"csi-hostpathplugin-9x5dg\" (UID: \"ce29e4b8-83fb-402d-a969-efa9106fdf29\") " pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832215 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ce29e4b8-83fb-402d-a969-efa9106fdf29-csi-data-dir\") pod \"csi-hostpathplugin-9x5dg\" (UID: \"ce29e4b8-83fb-402d-a969-efa9106fdf29\") " pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832244 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3de372b7-d84f-46f7-b773-255e32d0e882-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-cnw7p\" (UID: \"3de372b7-d84f-46f7-b773-255e32d0e882\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cnw7p" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832260 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03fb8831-5c58-4bcf-8e9c-d18d0074be3e-config\") pod \"etcd-operator-b45778765-s5wfn\" (UID: \"03fb8831-5c58-4bcf-8e9c-d18d0074be3e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832286 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xq7bp\" (UniqueName: \"kubernetes.io/projected/9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba-kube-api-access-xq7bp\") pod \"service-ca-operator-777779d784-44p4v\" (UID: \"9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-44p4v" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832308 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00c5c14d-d989-41a8-8447-c1a9c47426a2-config\") pod \"kube-controller-manager-operator-78b949d7b-zfzrm\" (UID: \"00c5c14d-d989-41a8-8447-c1a9c47426a2\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zfzrm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832331 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba-serving-cert\") pod \"service-ca-operator-777779d784-44p4v\" (UID: \"9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-44p4v" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832359 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89-secret-volume\") pod \"collect-profiles-29405460-778cj\" (UID: \"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832408 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba-config\") pod \"service-ca-operator-777779d784-44p4v\" (UID: \"9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-44p4v" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832454 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/00c5c14d-d989-41a8-8447-c1a9c47426a2-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zfzrm\" (UID: \"00c5c14d-d989-41a8-8447-c1a9c47426a2\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zfzrm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832473 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmbjb\" (UniqueName: \"kubernetes.io/projected/74a3fc68-fb6b-4070-85ea-a4e70aa6406b-kube-api-access-hmbjb\") pod \"service-ca-9c57cc56f-k9zcz\" (UID: \"74a3fc68-fb6b-4070-85ea-a4e70aa6406b\") " pod="openshift-service-ca/service-ca-9c57cc56f-k9zcz" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832494 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/beb2a474-a9eb-4a5a-915b-003a2654a0d0-metrics-tls\") pod \"dns-default-m4cv2\" (UID: \"beb2a474-a9eb-4a5a-915b-003a2654a0d0\") " pod="openshift-dns/dns-default-m4cv2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832510 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkd68\" (UniqueName: \"kubernetes.io/projected/ce29e4b8-83fb-402d-a969-efa9106fdf29-kube-api-access-jkd68\") pod \"csi-hostpathplugin-9x5dg\" (UID: \"ce29e4b8-83fb-402d-a969-efa9106fdf29\") " pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832526 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d25bad00-52c9-449a-a73b-b53b4c4f2577-auth-proxy-config\") pod \"machine-config-operator-74547568cd-7j5p2\" (UID: \"d25bad00-52c9-449a-a73b-b53b4c4f2577\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832558 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/74a3fc68-fb6b-4070-85ea-a4e70aa6406b-signing-key\") pod \"service-ca-9c57cc56f-k9zcz\" (UID: \"74a3fc68-fb6b-4070-85ea-a4e70aa6406b\") " pod="openshift-service-ca/service-ca-9c57cc56f-k9zcz" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832576 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03fb8831-5c58-4bcf-8e9c-d18d0074be3e-serving-cert\") pod \"etcd-operator-b45778765-s5wfn\" (UID: \"03fb8831-5c58-4bcf-8e9c-d18d0074be3e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832598 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/c8ae2931-245c-49b5-9844-432bc6ecf3cc-certs\") pod \"machine-config-server-czm5s\" (UID: \"c8ae2931-245c-49b5-9844-432bc6ecf3cc\") " pod="openshift-machine-config-operator/machine-config-server-czm5s" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832620 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d25bad00-52c9-449a-a73b-b53b4c4f2577-proxy-tls\") pod \"machine-config-operator-74547568cd-7j5p2\" (UID: \"d25bad00-52c9-449a-a73b-b53b4c4f2577\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832644 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ce29e4b8-83fb-402d-a969-efa9106fdf29-mountpoint-dir\") pod \"csi-hostpathplugin-9x5dg\" (UID: \"ce29e4b8-83fb-402d-a969-efa9106fdf29\") " pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832661 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89-config-volume\") pod \"collect-profiles-29405460-778cj\" (UID: \"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832667 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/beb2a474-a9eb-4a5a-915b-003a2654a0d0-config-volume\") pod \"dns-default-m4cv2\" (UID: \"beb2a474-a9eb-4a5a-915b-003a2654a0d0\") " pod="openshift-dns/dns-default-m4cv2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832675 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d25bad00-52c9-449a-a73b-b53b4c4f2577-images\") pod \"machine-config-operator-74547568cd-7j5p2\" (UID: \"d25bad00-52c9-449a-a73b-b53b4c4f2577\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832749 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/03fb8831-5c58-4bcf-8e9c-d18d0074be3e-etcd-client\") pod \"etcd-operator-b45778765-s5wfn\" (UID: \"03fb8831-5c58-4bcf-8e9c-d18d0074be3e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832804 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4b125207-13c8-4142-86d1-99645442eddf-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-m6kq9\" (UID: \"4b125207-13c8-4142-86d1-99645442eddf\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-m6kq9" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832820 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7vrt\" (UniqueName: \"kubernetes.io/projected/641c9aa3-7da2-4122-9410-cd46d1733143-kube-api-access-m7vrt\") pod \"ingress-canary-8b2zb\" (UID: \"641c9aa3-7da2-4122-9410-cd46d1733143\") " pod="openshift-ingress-canary/ingress-canary-8b2zb" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832835 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/74a3fc68-fb6b-4070-85ea-a4e70aa6406b-signing-cabundle\") pod \"service-ca-9c57cc56f-k9zcz\" (UID: \"74a3fc68-fb6b-4070-85ea-a4e70aa6406b\") " pod="openshift-service-ca/service-ca-9c57cc56f-k9zcz" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832873 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/641c9aa3-7da2-4122-9410-cd46d1733143-cert\") pod \"ingress-canary-8b2zb\" (UID: \"641c9aa3-7da2-4122-9410-cd46d1733143\") " pod="openshift-ingress-canary/ingress-canary-8b2zb" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832907 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832967 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00c5c14d-d989-41a8-8447-c1a9c47426a2-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zfzrm\" (UID: \"00c5c14d-d989-41a8-8447-c1a9c47426a2\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zfzrm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.832990 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/03fb8831-5c58-4bcf-8e9c-d18d0074be3e-etcd-ca\") pod \"etcd-operator-b45778765-s5wfn\" (UID: \"03fb8831-5c58-4bcf-8e9c-d18d0074be3e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.833014 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ce29e4b8-83fb-402d-a969-efa9106fdf29-registration-dir\") pod \"csi-hostpathplugin-9x5dg\" (UID: \"ce29e4b8-83fb-402d-a969-efa9106fdf29\") " pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.833046 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/03fb8831-5c58-4bcf-8e9c-d18d0074be3e-etcd-service-ca\") pod \"etcd-operator-b45778765-s5wfn\" (UID: \"03fb8831-5c58-4bcf-8e9c-d18d0074be3e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.833074 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmzh7\" (UniqueName: \"kubernetes.io/projected/d25bad00-52c9-449a-a73b-b53b4c4f2577-kube-api-access-lmzh7\") pod \"machine-config-operator-74547568cd-7j5p2\" (UID: \"d25bad00-52c9-449a-a73b-b53b4c4f2577\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.833092 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ce29e4b8-83fb-402d-a969-efa9106fdf29-socket-dir\") pod \"csi-hostpathplugin-9x5dg\" (UID: \"ce29e4b8-83fb-402d-a969-efa9106fdf29\") " pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.833206 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d25bad00-52c9-449a-a73b-b53b4c4f2577-images\") pod \"machine-config-operator-74547568cd-7j5p2\" (UID: \"d25bad00-52c9-449a-a73b-b53b4c4f2577\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.833339 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ce29e4b8-83fb-402d-a969-efa9106fdf29-socket-dir\") pod \"csi-hostpathplugin-9x5dg\" (UID: \"ce29e4b8-83fb-402d-a969-efa9106fdf29\") " pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.835894 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.335881628 +0000 UTC m=+135.464565838 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.835884 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ce29e4b8-83fb-402d-a969-efa9106fdf29-registration-dir\") pod \"csi-hostpathplugin-9x5dg\" (UID: \"ce29e4b8-83fb-402d-a969-efa9106fdf29\") " pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.836574 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/c8ae2931-245c-49b5-9844-432bc6ecf3cc-node-bootstrap-token\") pod \"machine-config-server-czm5s\" (UID: \"c8ae2931-245c-49b5-9844-432bc6ecf3cc\") " pod="openshift-machine-config-operator/machine-config-server-czm5s" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.836749 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ce29e4b8-83fb-402d-a969-efa9106fdf29-plugins-dir\") pod \"csi-hostpathplugin-9x5dg\" (UID: \"ce29e4b8-83fb-402d-a969-efa9106fdf29\") " pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.836790 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ce29e4b8-83fb-402d-a969-efa9106fdf29-csi-data-dir\") pod \"csi-hostpathplugin-9x5dg\" (UID: \"ce29e4b8-83fb-402d-a969-efa9106fdf29\") " pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.838443 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/641c9aa3-7da2-4122-9410-cd46d1733143-cert\") pod \"ingress-canary-8b2zb\" (UID: \"641c9aa3-7da2-4122-9410-cd46d1733143\") " pod="openshift-ingress-canary/ingress-canary-8b2zb" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.838534 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/74a3fc68-fb6b-4070-85ea-a4e70aa6406b-signing-cabundle\") pod \"service-ca-9c57cc56f-k9zcz\" (UID: \"74a3fc68-fb6b-4070-85ea-a4e70aa6406b\") " pod="openshift-service-ca/service-ca-9c57cc56f-k9zcz" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.838560 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/03fb8831-5c58-4bcf-8e9c-d18d0074be3e-etcd-service-ca\") pod \"etcd-operator-b45778765-s5wfn\" (UID: \"03fb8831-5c58-4bcf-8e9c-d18d0074be3e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.839072 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/03fb8831-5c58-4bcf-8e9c-d18d0074be3e-etcd-ca\") pod \"etcd-operator-b45778765-s5wfn\" (UID: \"03fb8831-5c58-4bcf-8e9c-d18d0074be3e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.847177 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3de372b7-d84f-46f7-b773-255e32d0e882-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-cnw7p\" (UID: \"3de372b7-d84f-46f7-b773-255e32d0e882\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cnw7p" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.850682 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d25bad00-52c9-449a-a73b-b53b4c4f2577-auth-proxy-config\") pod \"machine-config-operator-74547568cd-7j5p2\" (UID: \"d25bad00-52c9-449a-a73b-b53b4c4f2577\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.851753 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03fb8831-5c58-4bcf-8e9c-d18d0074be3e-config\") pod \"etcd-operator-b45778765-s5wfn\" (UID: \"03fb8831-5c58-4bcf-8e9c-d18d0074be3e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.852291 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00c5c14d-d989-41a8-8447-c1a9c47426a2-config\") pod \"kube-controller-manager-operator-78b949d7b-zfzrm\" (UID: \"00c5c14d-d989-41a8-8447-c1a9c47426a2\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zfzrm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.856977 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba-config\") pod \"service-ca-operator-777779d784-44p4v\" (UID: \"9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-44p4v" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.858312 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89-config-volume\") pod \"collect-profiles-29405460-778cj\" (UID: \"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.860877 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.861166 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ce29e4b8-83fb-402d-a969-efa9106fdf29-mountpoint-dir\") pod \"csi-hostpathplugin-9x5dg\" (UID: \"ce29e4b8-83fb-402d-a969-efa9106fdf29\") " pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.861742 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89-secret-volume\") pod \"collect-profiles-29405460-778cj\" (UID: \"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.866582 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03fb8831-5c58-4bcf-8e9c-d18d0074be3e-serving-cert\") pod \"etcd-operator-b45778765-s5wfn\" (UID: \"03fb8831-5c58-4bcf-8e9c-d18d0074be3e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.866665 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/74a3fc68-fb6b-4070-85ea-a4e70aa6406b-signing-key\") pod \"service-ca-9c57cc56f-k9zcz\" (UID: \"74a3fc68-fb6b-4070-85ea-a4e70aa6406b\") " pod="openshift-service-ca/service-ca-9c57cc56f-k9zcz" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.867183 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/c8ae2931-245c-49b5-9844-432bc6ecf3cc-certs\") pod \"machine-config-server-czm5s\" (UID: \"c8ae2931-245c-49b5-9844-432bc6ecf3cc\") " pod="openshift-machine-config-operator/machine-config-server-czm5s" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.867389 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/beb2a474-a9eb-4a5a-915b-003a2654a0d0-metrics-tls\") pod \"dns-default-m4cv2\" (UID: \"beb2a474-a9eb-4a5a-915b-003a2654a0d0\") " pod="openshift-dns/dns-default-m4cv2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.867466 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00c5c14d-d989-41a8-8447-c1a9c47426a2-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zfzrm\" (UID: \"00c5c14d-d989-41a8-8447-c1a9c47426a2\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zfzrm" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.867940 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba-serving-cert\") pod \"service-ca-operator-777779d784-44p4v\" (UID: \"9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-44p4v" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.868686 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d25bad00-52c9-449a-a73b-b53b4c4f2577-proxy-tls\") pod \"machine-config-operator-74547568cd-7j5p2\" (UID: \"d25bad00-52c9-449a-a73b-b53b4c4f2577\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.869607 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.870497 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4b125207-13c8-4142-86d1-99645442eddf-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-m6kq9\" (UID: \"4b125207-13c8-4142-86d1-99645442eddf\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-m6kq9" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.870909 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/03fb8831-5c58-4bcf-8e9c-d18d0074be3e-etcd-client\") pod \"etcd-operator-b45778765-s5wfn\" (UID: \"03fb8831-5c58-4bcf-8e9c-d18d0074be3e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.887666 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.887832 4923 projected.go:194] Error preparing data for projected volume kube-api-access-j2n7j for pod openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf: failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.887879 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e7f08880-f8db-4170-8d1d-1bccb2df10f4-kube-api-access-j2n7j podName:e7f08880-f8db-4170-8d1d-1bccb2df10f4 nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.387863368 +0000 UTC m=+135.516547578 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-j2n7j" (UniqueName: "kubernetes.io/projected/e7f08880-f8db-4170-8d1d-1bccb2df10f4-kube-api-access-j2n7j") pod "openshift-config-operator-7777fb866f-mqbzf" (UID: "e7f08880-f8db-4170-8d1d-1bccb2df10f4") : failed to sync configmap cache: timed out waiting for the condition Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.905315 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.923555 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.938359 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:55 crc kubenswrapper[4923]: E1128 11:10:55.939128 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.439109166 +0000 UTC m=+135.567793376 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.948183 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.971169 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 28 11:10:55 crc kubenswrapper[4923]: I1128 11:10:55.985034 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.010419 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.046696 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.046853 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:56 crc kubenswrapper[4923]: E1128 11:10:56.047204 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.547192043 +0000 UTC m=+135.675876243 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.050262 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.066686 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.088624 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.089340 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/0755523c-a607-4b3e-966e-cb31294dde65-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-6t9g2\" (UID: \"0755523c-a607-4b3e-966e-cb31294dde65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.106149 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.109686 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hvt9\" (UniqueName: \"kubernetes.io/projected/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-kube-api-access-6hvt9\") pod \"machine-approver-56656f9798-ckhbm\" (UID: \"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.149204 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:56 crc kubenswrapper[4923]: E1128 11:10:56.150042 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.65002605 +0000 UTC m=+135.778710260 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.168327 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6f176857-50d2-41c7-8237-961e330c629d-bound-sa-token\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.180642 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmgnv\" (UniqueName: \"kubernetes.io/projected/6f176857-50d2-41c7-8237-961e330c629d-kube-api-access-tmgnv\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.189171 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-tz5lm"] Nov 28 11:10:56 crc kubenswrapper[4923]: W1128 11:10:56.198377 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podabd99a1a_b8bf_44a5_82c4_a44a0c32a7ff.slice/crio-dc759613af0898de5daa4d32f5a3dd790f5d1eb6c5a8bf9e3b64674bce2684e2 WatchSource:0}: Error finding container dc759613af0898de5daa4d32f5a3dd790f5d1eb6c5a8bf9e3b64674bce2684e2: Status 404 returned error can't find the container with id dc759613af0898de5daa4d32f5a3dd790f5d1eb6c5a8bf9e3b64674bce2684e2 Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.206384 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzcw8\" (UniqueName: \"kubernetes.io/projected/933d252a-8d35-415c-9e4e-754dd933be46-kube-api-access-tzcw8\") pod \"migrator-59844c95c7-8cknx\" (UID: \"933d252a-8d35-415c-9e4e-754dd933be46\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-8cknx" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.224896 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0755523c-a607-4b3e-966e-cb31294dde65-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-6t9g2\" (UID: \"0755523c-a607-4b3e-966e-cb31294dde65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.240963 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-8cknx" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.243389 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lmqc\" (UniqueName: \"kubernetes.io/projected/f8ec4511-563c-43b0-ad0d-b7916aeee89a-kube-api-access-9lmqc\") pod \"kube-storage-version-migrator-operator-b67b599dd-nmc8h\" (UID: \"f8ec4511-563c-43b0-ad0d-b7916aeee89a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nmc8h" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.251422 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwmwf\" (UniqueName: \"kubernetes.io/projected/aa97fc63-7e09-4217-9fb9-78fca4703f04-kube-api-access-wwmwf\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.251633 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:56 crc kubenswrapper[4923]: E1128 11:10:56.251883 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.75187271 +0000 UTC m=+135.880556920 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.253885 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nmc8h" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.263641 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwlcz\" (UniqueName: \"kubernetes.io/projected/ed45dc51-25a9-47f1-b80b-de4288627e50-kube-api-access-kwlcz\") pod \"openshift-controller-manager-operator-756b6f6bc6-jhjvz\" (UID: \"ed45dc51-25a9-47f1-b80b-de4288627e50\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jhjvz" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.264085 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwmwf\" (UniqueName: \"kubernetes.io/projected/aa97fc63-7e09-4217-9fb9-78fca4703f04-kube-api-access-wwmwf\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.284455 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjxtn\" (UniqueName: \"kubernetes.io/projected/260bf9ee-f05f-4598-9bf9-de7ed2d1723f-kube-api-access-hjxtn\") pod \"olm-operator-6b444d44fb-kwrnm\" (UID: \"260bf9ee-f05f-4598-9bf9-de7ed2d1723f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.302849 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlpgs\" (UniqueName: \"kubernetes.io/projected/0755523c-a607-4b3e-966e-cb31294dde65-kube-api-access-xlpgs\") pod \"cluster-image-registry-operator-dc59b4c8b-6t9g2\" (UID: \"0755523c-a607-4b3e-966e-cb31294dde65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.319503 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wvsb\" (UniqueName: \"kubernetes.io/projected/a49569a7-dda0-4856-816e-296642ddbdff-kube-api-access-5wvsb\") pod \"router-default-5444994796-dfffg\" (UID: \"a49569a7-dda0-4856-816e-296642ddbdff\") " pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.342669 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzgn4\" (UniqueName: \"kubernetes.io/projected/3a41216a-9d26-4691-aa6b-8a50c0a94016-kube-api-access-pzgn4\") pod \"control-plane-machine-set-operator-78cbb6b69f-f4dfh\" (UID: \"3a41216a-9d26-4691-aa6b-8a50c0a94016\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-f4dfh" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.355649 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:56 crc kubenswrapper[4923]: E1128 11:10:56.356129 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.856074646 +0000 UTC m=+135.984758856 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.356424 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-config\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.356505 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-etcd-serving-ca\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.356570 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-etcd-client\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.357468 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-image-import-ca\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.357509 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvb8s\" (UniqueName: \"kubernetes.io/projected/cdedfd6e-9082-4411-b128-fc9806c67bd3-kube-api-access-rvb8s\") pod \"route-controller-manager-6576b87f9c-p7flx\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.357587 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-trusted-ca-bundle\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.357612 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.357671 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-encryption-config\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.358173 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-etcd-serving-ca\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.358177 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-config\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.359102 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-image-import-ca\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:56 crc kubenswrapper[4923]: E1128 11:10:56.359392 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.85937943 +0000 UTC m=+135.988063640 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.362820 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-encryption-config\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.365658 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/37ddbfb0-c042-460d-b772-9cdd214a79a1-etcd-client\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.367306 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvb8s\" (UniqueName: \"kubernetes.io/projected/cdedfd6e-9082-4411-b128-fc9806c67bd3-kube-api-access-rvb8s\") pod \"route-controller-manager-6576b87f9c-p7flx\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.376315 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37ddbfb0-c042-460d-b772-9cdd214a79a1-trusted-ca-bundle\") pod \"apiserver-76f77b778f-lhbv8\" (UID: \"37ddbfb0-c042-460d-b772-9cdd214a79a1\") " pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.377526 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14ef552e-14c1-49e6-b06d-0736e2a3ed73-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-dxdgg\" (UID: \"14ef552e-14c1-49e6-b06d-0736e2a3ed73\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dxdgg" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.388632 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shllm\" (UniqueName: \"kubernetes.io/projected/31885971-a674-4f47-999b-d7e5435f34d0-kube-api-access-shllm\") pod \"machine-config-controller-84d6567774-hcztj\" (UID: \"31885971-a674-4f47-999b-d7e5435f34d0\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hcztj" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.408967 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/03781a39-3788-4e8a-9a0d-d97c3fb9e4b3-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-l7sqm\" (UID: \"03781a39-3788-4e8a-9a0d-d97c3fb9e4b3\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-l7sqm" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.423648 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kw89r\" (UniqueName: \"kubernetes.io/projected/7638a2ec-d85c-456d-9d1b-9e56d83eae4b-kube-api-access-kw89r\") pod \"downloads-7954f5f757-fd2jt\" (UID: \"7638a2ec-d85c-456d-9d1b-9e56d83eae4b\") " pod="openshift-console/downloads-7954f5f757-fd2jt" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.438037 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.443863 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ll6pl\" (UniqueName: \"kubernetes.io/projected/bc404fb9-c265-4265-84e8-e3dd111fae9a-kube-api-access-ll6pl\") pod \"marketplace-operator-79b997595-lswhk\" (UID: \"bc404fb9-c265-4265-84e8-e3dd111fae9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.460791 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.460982 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-config\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.461013 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-client-ca\") pod \"route-controller-manager-6576b87f9c-p7flx\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.461065 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-service-ca\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.461087 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2n7j\" (UniqueName: \"kubernetes.io/projected/e7f08880-f8db-4170-8d1d-1bccb2df10f4-kube-api-access-j2n7j\") pod \"openshift-config-operator-7777fb866f-mqbzf\" (UID: \"e7f08880-f8db-4170-8d1d-1bccb2df10f4\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" Nov 28 11:10:56 crc kubenswrapper[4923]: E1128 11:10:56.461119 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.961094336 +0000 UTC m=+136.089778546 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.461162 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-config\") pod \"route-controller-manager-6576b87f9c-p7flx\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.461235 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7f08880-f8db-4170-8d1d-1bccb2df10f4-serving-cert\") pod \"openshift-config-operator-7777fb866f-mqbzf\" (UID: \"e7f08880-f8db-4170-8d1d-1bccb2df10f4\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.461299 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-oauth-config\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.461326 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-trusted-ca-bundle\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.461431 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/889057c8-1eb2-4829-b1d5-a906b88eb68c-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-j6dnf\" (UID: \"889057c8-1eb2-4829-b1d5-a906b88eb68c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-j6dnf" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.461516 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.461535 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.462521 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.463319 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-config\") pod \"route-controller-manager-6576b87f9c-p7flx\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.467365 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2n7j\" (UniqueName: \"kubernetes.io/projected/e7f08880-f8db-4170-8d1d-1bccb2df10f4-kube-api-access-j2n7j\") pod \"openshift-config-operator-7777fb866f-mqbzf\" (UID: \"e7f08880-f8db-4170-8d1d-1bccb2df10f4\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.467781 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4926f4a2-0ee6-444b-a113-f6ee1d162d72-config\") pod \"authentication-operator-69f744f599-cbtlt\" (UID: \"4926f4a2-0ee6-444b-a113-f6ee1d162d72\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.468297 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-client-ca\") pod \"route-controller-manager-6576b87f9c-p7flx\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.468766 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-service-ca\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.469312 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7f08880-f8db-4170-8d1d-1bccb2df10f4-serving-cert\") pod \"openshift-config-operator-7777fb866f-mqbzf\" (UID: \"e7f08880-f8db-4170-8d1d-1bccb2df10f4\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.470449 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-trusted-ca-bundle\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:56 crc kubenswrapper[4923]: E1128 11:10:56.470716 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:56.970701517 +0000 UTC m=+136.099385727 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.471132 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8dmq\" (UniqueName: \"kubernetes.io/projected/03fb8831-5c58-4bcf-8e9c-d18d0074be3e-kube-api-access-p8dmq\") pod \"etcd-operator-b45778765-s5wfn\" (UID: \"03fb8831-5c58-4bcf-8e9c-d18d0074be3e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.478519 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/889057c8-1eb2-4829-b1d5-a906b88eb68c-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-j6dnf\" (UID: \"889057c8-1eb2-4829-b1d5-a906b88eb68c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-j6dnf" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.479893 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jhjvz" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.481642 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-oauth-config\") pod \"console-f9d7485db-2vsdg\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.492642 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.502317 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.518488 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.521101 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-tz5lm" event={"ID":"abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff","Type":"ContainerStarted","Data":"30c6b1dc55f14178f4fd2de894ec2a6d4907a9e0147276b529f521330f17808c"} Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.521141 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-tz5lm" event={"ID":"abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff","Type":"ContainerStarted","Data":"dc759613af0898de5daa4d32f5a3dd790f5d1eb6c5a8bf9e3b64674bce2684e2"} Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.521783 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-tz5lm" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.521887 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-f4dfh" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.524339 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8ptf\" (UniqueName: \"kubernetes.io/projected/beb2a474-a9eb-4a5a-915b-003a2654a0d0-kube-api-access-p8ptf\") pod \"dns-default-m4cv2\" (UID: \"beb2a474-a9eb-4a5a-915b-003a2654a0d0\") " pod="openshift-dns/dns-default-m4cv2" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.526424 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6rgf\" (UniqueName: \"kubernetes.io/projected/3de372b7-d84f-46f7-b773-255e32d0e882-kube-api-access-m6rgf\") pod \"package-server-manager-789f6589d5-cnw7p\" (UID: \"3de372b7-d84f-46f7-b773-255e32d0e882\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cnw7p" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.539549 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-l7sqm" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.539971 4923 patch_prober.go:28] interesting pod/console-operator-58897d9998-tz5lm container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.26:8443/readyz\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.540001 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-tz5lm" podUID="abd99a1a-b8bf-44a5-82c4-a44a0c32a7ff" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.26:8443/readyz\": dial tcp 10.217.0.26:8443: connect: connection refused" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.543567 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h675h\" (UniqueName: \"kubernetes.io/projected/c8ae2931-245c-49b5-9844-432bc6ecf3cc-kube-api-access-h675h\") pod \"machine-config-server-czm5s\" (UID: \"c8ae2931-245c-49b5-9844-432bc6ecf3cc\") " pod="openshift-machine-config-operator/machine-config-server-czm5s" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.544682 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mj9gd\" (UniqueName: \"kubernetes.io/projected/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89-kube-api-access-mj9gd\") pod \"collect-profiles-29405460-778cj\" (UID: \"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.545992 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.550800 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd" event={"ID":"c948b544-79ae-4148-8483-ef898d1b6663","Type":"ContainerStarted","Data":"8c186edbe78a655972aa863d3ee32383bf0ab4dac51dc18881562e018982ca84"} Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.550840 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd" event={"ID":"c948b544-79ae-4148-8483-ef898d1b6663","Type":"ContainerStarted","Data":"a1b41576b716e3f27b4a444745958cdb2487e6e1d5465e5fe0527f640e316363"} Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.550851 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd" event={"ID":"c948b544-79ae-4148-8483-ef898d1b6663","Type":"ContainerStarted","Data":"cc53ab8de2ebf9b9ec052dd5cd1f21f1ce8d29b277b6ea600767cd494b75546c"} Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.556725 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-j6dnf" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.562059 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42vrg\" (UniqueName: \"kubernetes.io/projected/4b125207-13c8-4142-86d1-99645442eddf-kube-api-access-42vrg\") pod \"multus-admission-controller-857f4d67dd-m6kq9\" (UID: \"4b125207-13c8-4142-86d1-99645442eddf\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-m6kq9" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.562208 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dxdgg" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.562678 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.562794 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-auth-proxy-config\") pod \"machine-approver-56656f9798-ckhbm\" (UID: \"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.563977 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-machine-approver-tls\") pod \"machine-approver-56656f9798-ckhbm\" (UID: \"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.564018 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-config\") pod \"machine-approver-56656f9798-ckhbm\" (UID: \"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.564415 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-config\") pod \"machine-approver-56656f9798-ckhbm\" (UID: \"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:56 crc kubenswrapper[4923]: E1128 11:10:56.564602 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:57.064583842 +0000 UTC m=+136.193268052 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.565017 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-auth-proxy-config\") pod \"machine-approver-56656f9798-ckhbm\" (UID: \"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.565746 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" event={"ID":"ad44a891-fc97-4154-8f93-bbd276c5c18a","Type":"ContainerStarted","Data":"e1944c393d47c7de969ae5dc55ca1f3f4d7282cf8018157344ba21b62dcbfef7"} Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.565784 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" event={"ID":"ad44a891-fc97-4154-8f93-bbd276c5c18a","Type":"ContainerStarted","Data":"8f8f69906629ca0b0b366dcfd7cd8144a8f6360bde5e04bf377a4813c6416c90"} Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.566073 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.566324 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hcztj" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.569171 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.573179 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.576365 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/1b9cce3b-92fb-45f2-a81a-4a0a722ed13e-machine-approver-tls\") pod \"machine-approver-56656f9798-ckhbm\" (UID: \"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.581837 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmzh7\" (UniqueName: \"kubernetes.io/projected/d25bad00-52c9-449a-a73b-b53b4c4f2577-kube-api-access-lmzh7\") pod \"machine-config-operator-74547568cd-7j5p2\" (UID: \"d25bad00-52c9-449a-a73b-b53b4c4f2577\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.584126 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.598154 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-m6kq9" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.598171 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.601231 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" event={"ID":"54e50da8-3e06-4de6-88c0-cfe151b794ca","Type":"ContainerStarted","Data":"f054b1b6686c0dfde736312c9c1a54594bfe2d0ceee0d7598d5c961e6736d748"} Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.601260 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" event={"ID":"54e50da8-3e06-4de6-88c0-cfe151b794ca","Type":"ContainerStarted","Data":"8a56bf2b097705901b4d8f75ed83af978c51aa03119d23555dad83b38f2fdbfa"} Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.601787 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.602967 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7vrt\" (UniqueName: \"kubernetes.io/projected/641c9aa3-7da2-4122-9410-cd46d1733143-kube-api-access-m7vrt\") pod \"ingress-canary-8b2zb\" (UID: \"641c9aa3-7da2-4122-9410-cd46d1733143\") " pod="openshift-ingress-canary/ingress-canary-8b2zb" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.604390 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9" event={"ID":"05bdba74-09ab-4d1c-9742-c842abf6c9f6","Type":"ContainerStarted","Data":"0ab95ff03fe4106947a95e4a5c18d7940c3e1c9605d331d02e8dbf2bbe2fc1fe"} Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.604432 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9" event={"ID":"05bdba74-09ab-4d1c-9742-c842abf6c9f6","Type":"ContainerStarted","Data":"65ea30f8a9c310b6bb20771fc52d0ebf6d454e62474e028d360199a7d5d43cc9"} Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.605816 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.606423 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.612202 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cnw7p" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.640976 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-w99dl" event={"ID":"45d0a5e3-828c-43e0-a609-ac7ae08d57af","Type":"ContainerStarted","Data":"ef6d9f32d3ec5dcc1c140259243a0aeac4b916b031da1c653ea3b6f637b6580a"} Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.641012 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-w99dl" event={"ID":"45d0a5e3-828c-43e0-a609-ac7ae08d57af","Type":"ContainerStarted","Data":"b76c0cc7787006622904a0960092d23e89c76f446c1e2f4155b1a3d1325a2f40"} Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.641023 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-w99dl" event={"ID":"45d0a5e3-828c-43e0-a609-ac7ae08d57af","Type":"ContainerStarted","Data":"c7b8cf96433320d2f97842d1c4c1444bccc6974b43665de203b3af7ac9463b65"} Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.643097 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-m4cv2" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.643560 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.651278 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-czm5s" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.652006 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkd68\" (UniqueName: \"kubernetes.io/projected/ce29e4b8-83fb-402d-a969-efa9106fdf29-kube-api-access-jkd68\") pod \"csi-hostpathplugin-9x5dg\" (UID: \"ce29e4b8-83fb-402d-a969-efa9106fdf29\") " pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.658872 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xq7bp\" (UniqueName: \"kubernetes.io/projected/9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba-kube-api-access-xq7bp\") pod \"service-ca-operator-777779d784-44p4v\" (UID: \"9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-44p4v" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.667195 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:56 crc kubenswrapper[4923]: E1128 11:10:56.674870 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:57.1748537 +0000 UTC m=+136.303537910 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.683045 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/00c5c14d-d989-41a8-8447-c1a9c47426a2-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zfzrm\" (UID: \"00c5c14d-d989-41a8-8447-c1a9c47426a2\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zfzrm" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.701567 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmbjb\" (UniqueName: \"kubernetes.io/projected/74a3fc68-fb6b-4070-85ea-a4e70aa6406b-kube-api-access-hmbjb\") pod \"service-ca-9c57cc56f-k9zcz\" (UID: \"74a3fc68-fb6b-4070-85ea-a4e70aa6406b\") " pod="openshift-service-ca/service-ca-9c57cc56f-k9zcz" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.712193 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-fd2jt" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.724990 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.731557 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.772410 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:56 crc kubenswrapper[4923]: E1128 11:10:56.772546 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:57.272525092 +0000 UTC m=+136.401209302 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.772683 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:56 crc kubenswrapper[4923]: E1128 11:10:56.774751 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:57.274737464 +0000 UTC m=+136.403421674 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.812777 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-8b2zb" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.830694 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nmc8h"] Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.852484 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-8cknx"] Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.883399 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.883976 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:56 crc kubenswrapper[4923]: E1128 11:10:56.884306 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:57.384292682 +0000 UTC m=+136.512976892 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.889404 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-44p4v" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.916462 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zfzrm" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.920307 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-k9zcz" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.921401 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" podStartSLOduration=116.921384671 podStartE2EDuration="1m56.921384671s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:56.883126579 +0000 UTC m=+136.011810789" watchObservedRunningTime="2025-11-28 11:10:56.921384671 +0000 UTC m=+136.050068881" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.938132 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" Nov 28 11:10:56 crc kubenswrapper[4923]: I1128 11:10:56.984852 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:56 crc kubenswrapper[4923]: E1128 11:10:56.985138 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:57.485127753 +0000 UTC m=+136.613811963 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:56 crc kubenswrapper[4923]: W1128 11:10:56.987301 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf8ec4511_563c_43b0_ad0d_b7916aeee89a.slice/crio-ec23108ac293c440718a798d4a8fd0c15e19ac10b71cec0d43ed37158c9b539c WatchSource:0}: Error finding container ec23108ac293c440718a798d4a8fd0c15e19ac10b71cec0d43ed37158c9b539c: Status 404 returned error can't find the container with id ec23108ac293c440718a798d4a8fd0c15e19ac10b71cec0d43ed37158c9b539c Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.035807 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" podStartSLOduration=118.035790276 podStartE2EDuration="1m58.035790276s" podCreationTimestamp="2025-11-28 11:08:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:57.034796967 +0000 UTC m=+136.163481177" watchObservedRunningTime="2025-11-28 11:10:57.035790276 +0000 UTC m=+136.164474486" Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.048242 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.093421 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:57 crc kubenswrapper[4923]: E1128 11:10:57.093811 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:57.593796536 +0000 UTC m=+136.722480746 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.095494 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.131018 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-52xfd" podStartSLOduration=117.131004228 podStartE2EDuration="1m57.131004228s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:57.130291568 +0000 UTC m=+136.258975778" watchObservedRunningTime="2025-11-28 11:10:57.131004228 +0000 UTC m=+136.259688438" Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.195690 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:57 crc kubenswrapper[4923]: E1128 11:10:57.196225 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:57.696213022 +0000 UTC m=+136.824897232 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.252684 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2"] Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.301908 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:57 crc kubenswrapper[4923]: E1128 11:10:57.302236 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:57.802218989 +0000 UTC m=+136.930903199 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.410094 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:57 crc kubenswrapper[4923]: E1128 11:10:57.410361 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:57.910351536 +0000 UTC m=+137.039035746 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.423163 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-tz5lm" podStartSLOduration=118.423149168 podStartE2EDuration="1m58.423149168s" podCreationTimestamp="2025-11-28 11:08:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:57.422399857 +0000 UTC m=+136.551084067" watchObservedRunningTime="2025-11-28 11:10:57.423149168 +0000 UTC m=+136.551833378" Nov 28 11:10:57 crc kubenswrapper[4923]: W1128 11:10:57.465152 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0755523c_a607_4b3e_966e_cb31294dde65.slice/crio-53c7af59663a21eadb811eea7719069d1865d11dbc35f15fa6886a9b0c9f65ba WatchSource:0}: Error finding container 53c7af59663a21eadb811eea7719069d1865d11dbc35f15fa6886a9b0c9f65ba: Status 404 returned error can't find the container with id 53c7af59663a21eadb811eea7719069d1865d11dbc35f15fa6886a9b0c9f65ba Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.514815 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:57 crc kubenswrapper[4923]: E1128 11:10:57.516402 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:58.016377464 +0000 UTC m=+137.145061664 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.528204 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:57 crc kubenswrapper[4923]: E1128 11:10:57.528608 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:58.02859529 +0000 UTC m=+137.157279500 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.551415 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-w99dl" podStartSLOduration=117.551399485 podStartE2EDuration="1m57.551399485s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:57.49606593 +0000 UTC m=+136.624750140" watchObservedRunningTime="2025-11-28 11:10:57.551399485 +0000 UTC m=+136.680083695" Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.598867 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj"] Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.632405 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:57 crc kubenswrapper[4923]: E1128 11:10:57.632765 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:58.132751265 +0000 UTC m=+137.261435475 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.633153 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jhjvz"] Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.675221 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-f4dfh"] Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.703771 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hfvn9" podStartSLOduration=117.703755572 podStartE2EDuration="1m57.703755572s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:57.703125525 +0000 UTC m=+136.831809735" watchObservedRunningTime="2025-11-28 11:10:57.703755572 +0000 UTC m=+136.832439782" Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.734750 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:57 crc kubenswrapper[4923]: E1128 11:10:57.735107 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:58.235093208 +0000 UTC m=+137.363777418 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.788965 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nmc8h" event={"ID":"f8ec4511-563c-43b0-ad0d-b7916aeee89a","Type":"ContainerStarted","Data":"ec23108ac293c440718a798d4a8fd0c15e19ac10b71cec0d43ed37158c9b539c"} Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.790726 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj" event={"ID":"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89","Type":"ContainerStarted","Data":"40e012068a17395f83088fa6ec7f47b618f03ac95cd16a344fdc38705c1c7093"} Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.791652 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2" event={"ID":"0755523c-a607-4b3e-966e-cb31294dde65","Type":"ContainerStarted","Data":"53c7af59663a21eadb811eea7719069d1865d11dbc35f15fa6886a9b0c9f65ba"} Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.827989 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" event={"ID":"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e","Type":"ContainerStarted","Data":"ab16d6aa368e5a389afeebee69e296776f50056697fe510963060296ae48f916"} Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.835118 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:57 crc kubenswrapper[4923]: E1128 11:10:57.835258 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:58.335237069 +0000 UTC m=+137.463921279 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.835854 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-czm5s" event={"ID":"c8ae2931-245c-49b5-9844-432bc6ecf3cc","Type":"ContainerStarted","Data":"e69d9352a9579e3726ede3ca452e863dbff21e346c680df055e8d320e479ef8f"} Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.842586 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:57 crc kubenswrapper[4923]: E1128 11:10:57.842918 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:58.342908486 +0000 UTC m=+137.471592686 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.856230 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-dfffg" event={"ID":"a49569a7-dda0-4856-816e-296642ddbdff","Type":"ContainerStarted","Data":"1b25fecaa17cf95a3aa5cd32bf915f5bc34511595db1cc3d26560a5e2e666c98"} Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.862762 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-8cknx" event={"ID":"933d252a-8d35-415c-9e4e-754dd933be46","Type":"ContainerStarted","Data":"78cac4970d811a6a98e811b29cc2a2851f26e5e3a163898bfc35981c849d96ac"} Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.949307 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:57 crc kubenswrapper[4923]: E1128 11:10:57.949599 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:58.449564242 +0000 UTC m=+137.578248452 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.950076 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:57 crc kubenswrapper[4923]: E1128 11:10:57.950342 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:58.450335254 +0000 UTC m=+137.579019464 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:57 crc kubenswrapper[4923]: I1128 11:10:57.985520 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6vdll" podStartSLOduration=117.985505638 podStartE2EDuration="1m57.985505638s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:57.982209945 +0000 UTC m=+137.110894155" watchObservedRunningTime="2025-11-28 11:10:57.985505638 +0000 UTC m=+137.114189848" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.043600 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-l4xf8"] Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.044629 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l4xf8" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.051113 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.051460 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 11:10:58 crc kubenswrapper[4923]: E1128 11:10:58.051478 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:58.551463673 +0000 UTC m=+137.680147883 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.056970 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l4xf8"] Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.082546 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-m6kq9"] Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.109462 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-lhbv8"] Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.124083 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-m4snv" podStartSLOduration=118.124065196 podStartE2EDuration="1m58.124065196s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:58.124041605 +0000 UTC m=+137.252725815" watchObservedRunningTime="2025-11-28 11:10:58.124065196 +0000 UTC m=+137.252749406" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.156054 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4-catalog-content\") pod \"certified-operators-l4xf8\" (UID: \"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4\") " pod="openshift-marketplace/certified-operators-l4xf8" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.156106 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2bjs\" (UniqueName: \"kubernetes.io/projected/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4-kube-api-access-k2bjs\") pod \"certified-operators-l4xf8\" (UID: \"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4\") " pod="openshift-marketplace/certified-operators-l4xf8" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.156152 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.156186 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4-utilities\") pod \"certified-operators-l4xf8\" (UID: \"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4\") " pod="openshift-marketplace/certified-operators-l4xf8" Nov 28 11:10:58 crc kubenswrapper[4923]: E1128 11:10:58.156423 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:58.656408421 +0000 UTC m=+137.785092631 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.205375 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-tz5lm" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.230968 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-46sx6"] Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.232080 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-46sx6" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.232248 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.232863 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.247674 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.252494 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-46sx6"] Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.258104 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.258298 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4-catalog-content\") pod \"certified-operators-l4xf8\" (UID: \"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4\") " pod="openshift-marketplace/certified-operators-l4xf8" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.258323 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cffefe1d-9522-408d-aadf-c688411908e1-catalog-content\") pod \"community-operators-46sx6\" (UID: \"cffefe1d-9522-408d-aadf-c688411908e1\") " pod="openshift-marketplace/community-operators-46sx6" Nov 28 11:10:58 crc kubenswrapper[4923]: E1128 11:10:58.258340 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:58.758321902 +0000 UTC m=+137.887006102 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.258419 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2bjs\" (UniqueName: \"kubernetes.io/projected/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4-kube-api-access-k2bjs\") pod \"certified-operators-l4xf8\" (UID: \"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4\") " pod="openshift-marketplace/certified-operators-l4xf8" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.258461 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cffefe1d-9522-408d-aadf-c688411908e1-utilities\") pod \"community-operators-46sx6\" (UID: \"cffefe1d-9522-408d-aadf-c688411908e1\") " pod="openshift-marketplace/community-operators-46sx6" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.258510 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zlhhb\" (UniqueName: \"kubernetes.io/projected/cffefe1d-9522-408d-aadf-c688411908e1-kube-api-access-zlhhb\") pod \"community-operators-46sx6\" (UID: \"cffefe1d-9522-408d-aadf-c688411908e1\") " pod="openshift-marketplace/community-operators-46sx6" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.258553 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.258610 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4-utilities\") pod \"certified-operators-l4xf8\" (UID: \"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4\") " pod="openshift-marketplace/certified-operators-l4xf8" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.258881 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4-catalog-content\") pod \"certified-operators-l4xf8\" (UID: \"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4\") " pod="openshift-marketplace/certified-operators-l4xf8" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.259025 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4-utilities\") pod \"certified-operators-l4xf8\" (UID: \"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4\") " pod="openshift-marketplace/certified-operators-l4xf8" Nov 28 11:10:58 crc kubenswrapper[4923]: E1128 11:10:58.260509 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:58.760488783 +0000 UTC m=+137.889172993 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.307755 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2bjs\" (UniqueName: \"kubernetes.io/projected/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4-kube-api-access-k2bjs\") pod \"certified-operators-l4xf8\" (UID: \"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4\") " pod="openshift-marketplace/certified-operators-l4xf8" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.341945 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.360066 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.360303 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cffefe1d-9522-408d-aadf-c688411908e1-catalog-content\") pod \"community-operators-46sx6\" (UID: \"cffefe1d-9522-408d-aadf-c688411908e1\") " pod="openshift-marketplace/community-operators-46sx6" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.360385 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cffefe1d-9522-408d-aadf-c688411908e1-utilities\") pod \"community-operators-46sx6\" (UID: \"cffefe1d-9522-408d-aadf-c688411908e1\") " pod="openshift-marketplace/community-operators-46sx6" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.360413 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zlhhb\" (UniqueName: \"kubernetes.io/projected/cffefe1d-9522-408d-aadf-c688411908e1-kube-api-access-zlhhb\") pod \"community-operators-46sx6\" (UID: \"cffefe1d-9522-408d-aadf-c688411908e1\") " pod="openshift-marketplace/community-operators-46sx6" Nov 28 11:10:58 crc kubenswrapper[4923]: E1128 11:10:58.361486 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:58.861470829 +0000 UTC m=+137.990155039 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.361781 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cffefe1d-9522-408d-aadf-c688411908e1-catalog-content\") pod \"community-operators-46sx6\" (UID: \"cffefe1d-9522-408d-aadf-c688411908e1\") " pod="openshift-marketplace/community-operators-46sx6" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.362435 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cffefe1d-9522-408d-aadf-c688411908e1-utilities\") pod \"community-operators-46sx6\" (UID: \"cffefe1d-9522-408d-aadf-c688411908e1\") " pod="openshift-marketplace/community-operators-46sx6" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.399636 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zlhhb\" (UniqueName: \"kubernetes.io/projected/cffefe1d-9522-408d-aadf-c688411908e1-kube-api-access-zlhhb\") pod \"community-operators-46sx6\" (UID: \"cffefe1d-9522-408d-aadf-c688411908e1\") " pod="openshift-marketplace/community-operators-46sx6" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.426005 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" podStartSLOduration=118.425989273 podStartE2EDuration="1m58.425989273s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:58.425947732 +0000 UTC m=+137.554631942" watchObservedRunningTime="2025-11-28 11:10:58.425989273 +0000 UTC m=+137.554673483" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.433873 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-js2pf"] Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.434967 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-js2pf" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.461597 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7f9kp\" (UniqueName: \"kubernetes.io/projected/f54a8053-81cc-429f-b68e-87a3fd245263-kube-api-access-7f9kp\") pod \"certified-operators-js2pf\" (UID: \"f54a8053-81cc-429f-b68e-87a3fd245263\") " pod="openshift-marketplace/certified-operators-js2pf" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.461650 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f54a8053-81cc-429f-b68e-87a3fd245263-catalog-content\") pod \"certified-operators-js2pf\" (UID: \"f54a8053-81cc-429f-b68e-87a3fd245263\") " pod="openshift-marketplace/certified-operators-js2pf" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.461673 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.461697 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f54a8053-81cc-429f-b68e-87a3fd245263-utilities\") pod \"certified-operators-js2pf\" (UID: \"f54a8053-81cc-429f-b68e-87a3fd245263\") " pod="openshift-marketplace/certified-operators-js2pf" Nov 28 11:10:58 crc kubenswrapper[4923]: E1128 11:10:58.462565 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:58.962554307 +0000 UTC m=+138.091238517 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.485254 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w6d2b" podStartSLOduration=119.485234878 podStartE2EDuration="1m59.485234878s" podCreationTimestamp="2025-11-28 11:08:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:58.451916866 +0000 UTC m=+137.580601076" watchObservedRunningTime="2025-11-28 11:10:58.485234878 +0000 UTC m=+137.613919088" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.485717 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-js2pf"] Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.496186 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.499716 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:10:58 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:10:58 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:10:58 crc kubenswrapper[4923]: healthz check failed Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.499768 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.536388 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cnw7p"] Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.549645 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf"] Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.572688 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-l7sqm"] Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.573483 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.573881 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7f9kp\" (UniqueName: \"kubernetes.io/projected/f54a8053-81cc-429f-b68e-87a3fd245263-kube-api-access-7f9kp\") pod \"certified-operators-js2pf\" (UID: \"f54a8053-81cc-429f-b68e-87a3fd245263\") " pod="openshift-marketplace/certified-operators-js2pf" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.574620 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f54a8053-81cc-429f-b68e-87a3fd245263-catalog-content\") pod \"certified-operators-js2pf\" (UID: \"f54a8053-81cc-429f-b68e-87a3fd245263\") " pod="openshift-marketplace/certified-operators-js2pf" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.574754 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f54a8053-81cc-429f-b68e-87a3fd245263-utilities\") pod \"certified-operators-js2pf\" (UID: \"f54a8053-81cc-429f-b68e-87a3fd245263\") " pod="openshift-marketplace/certified-operators-js2pf" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.575207 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f54a8053-81cc-429f-b68e-87a3fd245263-utilities\") pod \"certified-operators-js2pf\" (UID: \"f54a8053-81cc-429f-b68e-87a3fd245263\") " pod="openshift-marketplace/certified-operators-js2pf" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.575578 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f54a8053-81cc-429f-b68e-87a3fd245263-catalog-content\") pod \"certified-operators-js2pf\" (UID: \"f54a8053-81cc-429f-b68e-87a3fd245263\") " pod="openshift-marketplace/certified-operators-js2pf" Nov 28 11:10:58 crc kubenswrapper[4923]: E1128 11:10:58.586853 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:59.086823291 +0000 UTC m=+138.215507501 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.603751 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l4xf8" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.615555 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dxdgg"] Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.637647 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7f9kp\" (UniqueName: \"kubernetes.io/projected/f54a8053-81cc-429f-b68e-87a3fd245263-kube-api-access-7f9kp\") pod \"certified-operators-js2pf\" (UID: \"f54a8053-81cc-429f-b68e-87a3fd245263\") " pod="openshift-marketplace/certified-operators-js2pf" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.649266 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tdzqr"] Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.650460 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tdzqr" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.654219 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tdzqr"] Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.654645 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-46sx6" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.656047 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-s5wfn"] Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.677428 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:58 crc kubenswrapper[4923]: E1128 11:10:58.677751 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:59.177740421 +0000 UTC m=+138.306424631 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.778667 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.779199 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrb2k\" (UniqueName: \"kubernetes.io/projected/e28cae41-ead3-4395-a457-3077c92068ca-kube-api-access-vrb2k\") pod \"community-operators-tdzqr\" (UID: \"e28cae41-ead3-4395-a457-3077c92068ca\") " pod="openshift-marketplace/community-operators-tdzqr" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.779233 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e28cae41-ead3-4395-a457-3077c92068ca-utilities\") pod \"community-operators-tdzqr\" (UID: \"e28cae41-ead3-4395-a457-3077c92068ca\") " pod="openshift-marketplace/community-operators-tdzqr" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.779256 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e28cae41-ead3-4395-a457-3077c92068ca-catalog-content\") pod \"community-operators-tdzqr\" (UID: \"e28cae41-ead3-4395-a457-3077c92068ca\") " pod="openshift-marketplace/community-operators-tdzqr" Nov 28 11:10:58 crc kubenswrapper[4923]: E1128 11:10:58.780456 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:59.280439795 +0000 UTC m=+138.409123995 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.797573 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-dfffg" podStartSLOduration=118.797551149 podStartE2EDuration="1m58.797551149s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:58.767554941 +0000 UTC m=+137.896239151" watchObservedRunningTime="2025-11-28 11:10:58.797551149 +0000 UTC m=+137.926235369" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.803146 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-js2pf" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.881269 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.881333 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrb2k\" (UniqueName: \"kubernetes.io/projected/e28cae41-ead3-4395-a457-3077c92068ca-kube-api-access-vrb2k\") pod \"community-operators-tdzqr\" (UID: \"e28cae41-ead3-4395-a457-3077c92068ca\") " pod="openshift-marketplace/community-operators-tdzqr" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.881366 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e28cae41-ead3-4395-a457-3077c92068ca-utilities\") pod \"community-operators-tdzqr\" (UID: \"e28cae41-ead3-4395-a457-3077c92068ca\") " pod="openshift-marketplace/community-operators-tdzqr" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.881388 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e28cae41-ead3-4395-a457-3077c92068ca-catalog-content\") pod \"community-operators-tdzqr\" (UID: \"e28cae41-ead3-4395-a457-3077c92068ca\") " pod="openshift-marketplace/community-operators-tdzqr" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.881772 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e28cae41-ead3-4395-a457-3077c92068ca-catalog-content\") pod \"community-operators-tdzqr\" (UID: \"e28cae41-ead3-4395-a457-3077c92068ca\") " pod="openshift-marketplace/community-operators-tdzqr" Nov 28 11:10:58 crc kubenswrapper[4923]: E1128 11:10:58.882007 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:59.381996117 +0000 UTC m=+138.510680317 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.885508 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e28cae41-ead3-4395-a457-3077c92068ca-utilities\") pod \"community-operators-tdzqr\" (UID: \"e28cae41-ead3-4395-a457-3077c92068ca\") " pod="openshift-marketplace/community-operators-tdzqr" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.910452 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" event={"ID":"37ddbfb0-c042-460d-b772-9cdd214a79a1","Type":"ContainerStarted","Data":"32b3fd1004ddebeac92f3fbf1fd74a9c1a4a5ff6dbbf5dbd728a21b3763c45dc"} Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.912416 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrb2k\" (UniqueName: \"kubernetes.io/projected/e28cae41-ead3-4395-a457-3077c92068ca-kube-api-access-vrb2k\") pod \"community-operators-tdzqr\" (UID: \"e28cae41-ead3-4395-a457-3077c92068ca\") " pod="openshift-marketplace/community-operators-tdzqr" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.924317 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2" event={"ID":"0755523c-a607-4b3e-966e-cb31294dde65","Type":"ContainerStarted","Data":"13ed67f5140980fb91ca5164b1a525632971056d5c32262ed0da4bf60b683c6d"} Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.930327 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dxdgg" event={"ID":"14ef552e-14c1-49e6-b06d-0736e2a3ed73","Type":"ContainerStarted","Data":"b8022c59d6ebe16e02d699259c7227403f1b394effdf7a8b64500809dcf9e1da"} Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.932588 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lswhk"] Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.947021 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-czm5s" event={"ID":"c8ae2931-245c-49b5-9844-432bc6ecf3cc","Type":"ContainerStarted","Data":"563f0e13a3ad227b0990be9ac5dfa3511a148c14416e6873b76f0bc7198e30f6"} Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.947905 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6t9g2" podStartSLOduration=118.94788794 podStartE2EDuration="1m58.94788794s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:58.947869639 +0000 UTC m=+138.076553849" watchObservedRunningTime="2025-11-28 11:10:58.94788794 +0000 UTC m=+138.076572150" Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.957282 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-m4cv2"] Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.964982 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-j6dnf"] Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.982165 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:58 crc kubenswrapper[4923]: E1128 11:10:58.982331 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:59.482305753 +0000 UTC m=+138.610989963 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.982537 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:58 crc kubenswrapper[4923]: E1128 11:10:58.983239 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:59.483224359 +0000 UTC m=+138.611908569 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:58 crc kubenswrapper[4923]: I1128 11:10:58.994184 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tdzqr" Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.084482 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:59 crc kubenswrapper[4923]: E1128 11:10:59.084742 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:59.584720889 +0000 UTC m=+138.713405099 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.084994 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:59 crc kubenswrapper[4923]: E1128 11:10:59.085249 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:59.585242424 +0000 UTC m=+138.713926634 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.186328 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:59 crc kubenswrapper[4923]: E1128 11:10:59.186556 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:59.686281681 +0000 UTC m=+138.814965901 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.186738 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:59 crc kubenswrapper[4923]: E1128 11:10:59.187510 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:59.687494575 +0000 UTC m=+138.816178795 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.287326 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:59 crc kubenswrapper[4923]: E1128 11:10:59.287467 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:59.787438391 +0000 UTC m=+138.916122601 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.287658 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:59 crc kubenswrapper[4923]: E1128 11:10:59.287945 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:59.787920294 +0000 UTC m=+138.916604504 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.381700 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-8cknx" event={"ID":"933d252a-8d35-415c-9e4e-754dd933be46","Type":"ContainerStarted","Data":"cfcf96f9198fe761a9377518bf3020cc54ea9789f8fe4a8670d66d9c4a342807"} Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.381751 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-8cknx" event={"ID":"933d252a-8d35-415c-9e4e-754dd933be46","Type":"ContainerStarted","Data":"97f661e1d20107108dd105df94cf8ca256ba0e94e5175cfb9d7e25d04c9d5dea"} Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.381771 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-m6kq9" event={"ID":"4b125207-13c8-4142-86d1-99645442eddf","Type":"ContainerStarted","Data":"069cc3e07fde8849af02535cb2688960d3915142fb4f0a61bc204d81d8ba59d0"} Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.381793 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" event={"ID":"03fb8831-5c58-4bcf-8e9c-d18d0074be3e","Type":"ContainerStarted","Data":"029bfca6741521b4e9e380018380c650878b32eb98a42235a6f65c734f533ece"} Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.381810 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cnw7p" event={"ID":"3de372b7-d84f-46f7-b773-255e32d0e882","Type":"ContainerStarted","Data":"20605df87f85b148f387213e1c1e91db998a183f52bf72ec016344e93fd2d7d7"} Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.391644 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:59 crc kubenswrapper[4923]: E1128 11:10:59.392038 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:59.892009618 +0000 UTC m=+139.020693838 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.392252 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:59 crc kubenswrapper[4923]: E1128 11:10:59.392571 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:10:59.892558743 +0000 UTC m=+139.021242953 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.409114 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nmc8h" event={"ID":"f8ec4511-563c-43b0-ad0d-b7916aeee89a","Type":"ContainerStarted","Data":"440195bd533fe7b7cacdfc75b03681c0ab07709da10315d17a9616f395e6f796"} Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.417265 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-f4dfh" event={"ID":"3a41216a-9d26-4691-aa6b-8a50c0a94016","Type":"ContainerStarted","Data":"0fbbaa62c5b8d092d33a0b206016f4ef63029fd8da00db3b2a672040c813b1a6"} Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.417308 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-f4dfh" event={"ID":"3a41216a-9d26-4691-aa6b-8a50c0a94016","Type":"ContainerStarted","Data":"fd59915dadb19bb8f0078eb5b37ce1862d49084a4acbcc5125cb634521a916d9"} Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.421512 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-l7sqm" event={"ID":"03781a39-3788-4e8a-9a0d-d97c3fb9e4b3","Type":"ContainerStarted","Data":"d6b7a93b07ce62d628d11fe461e50ab58d47821e382b8efcf39bd061a6c7370b"} Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.423224 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj" event={"ID":"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89","Type":"ContainerStarted","Data":"73a41071dd19a6a0e8a0f6f1a9488c6407d59ab295ef40752949d8cabd9b61ba"} Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.453170 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" event={"ID":"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e","Type":"ContainerStarted","Data":"92553a23ad1e98e31a76899c1ecdf0b20ca6ef88d6711d1c3616caf751e859cf"} Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.465829 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-dfffg" event={"ID":"a49569a7-dda0-4856-816e-296642ddbdff","Type":"ContainerStarted","Data":"db6b6b14b441a89a7490e9e254ad5d5e863759bad5a6a1d30093449042b92ca3"} Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.473314 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jhjvz" event={"ID":"ed45dc51-25a9-47f1-b80b-de4288627e50","Type":"ContainerStarted","Data":"7be6402c59a21d62f302a6349397fb008ad3c438d69d16c0de14dbb46b59ce3d"} Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.473363 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jhjvz" event={"ID":"ed45dc51-25a9-47f1-b80b-de4288627e50","Type":"ContainerStarted","Data":"611beb031b57e8587e8fdc2c7c1d56b8b073709bc76496c2e9448dac9921c065"} Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.484495 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-czm5s" podStartSLOduration=7.484480612 podStartE2EDuration="7.484480612s" podCreationTimestamp="2025-11-28 11:10:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:58.980231584 +0000 UTC m=+138.108915794" watchObservedRunningTime="2025-11-28 11:10:59.484480612 +0000 UTC m=+138.613164822" Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.494049 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" event={"ID":"e7f08880-f8db-4170-8d1d-1bccb2df10f4","Type":"ContainerStarted","Data":"9b2b9a5e1e339ab847518943a95c8c20679a18b06c31b152a5aa7de4b20be49b"} Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.497193 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-8b2zb"] Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.497694 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:59 crc kubenswrapper[4923]: E1128 11:10:59.499140 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:10:59.999119816 +0000 UTC m=+139.127804026 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.500460 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:10:59 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:10:59 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:10:59 crc kubenswrapper[4923]: healthz check failed Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.500495 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.528162 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-hcztj"] Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.544954 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pbbcs" Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.548345 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-46sx6"] Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.559090 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2"] Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.562143 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-8cknx" podStartSLOduration=119.562119758 podStartE2EDuration="1m59.562119758s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:59.490723649 +0000 UTC m=+138.619407859" watchObservedRunningTime="2025-11-28 11:10:59.562119758 +0000 UTC m=+138.690803968" Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.565101 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-cbtlt"] Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.569573 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l4xf8"] Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.569613 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zfzrm"] Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.593249 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-2vsdg"] Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.593424 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-fd2jt"] Nov 28 11:10:59 crc kubenswrapper[4923]: W1128 11:10:59.593568 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcffefe1d_9522_408d_aadf_c688411908e1.slice/crio-0d68c9ad7077585d2ee26c0cf957b1273d3edb225d251056da7de07ec386b045 WatchSource:0}: Error finding container 0d68c9ad7077585d2ee26c0cf957b1273d3edb225d251056da7de07ec386b045: Status 404 returned error can't find the container with id 0d68c9ad7077585d2ee26c0cf957b1273d3edb225d251056da7de07ec386b045 Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.598027 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx"] Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.598402 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-44p4v"] Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.600194 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm"] Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.600552 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-nmc8h" podStartSLOduration=119.600543384 podStartE2EDuration="1m59.600543384s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:59.520914832 +0000 UTC m=+138.649599042" watchObservedRunningTime="2025-11-28 11:10:59.600543384 +0000 UTC m=+138.729227594" Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.601220 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:59 crc kubenswrapper[4923]: E1128 11:10:59.604198 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:00.104185737 +0000 UTC m=+139.232869947 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:59 crc kubenswrapper[4923]: W1128 11:10:59.605059 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod31885971_a674_4f47_999b_d7e5435f34d0.slice/crio-ebe12b1e962a0155e0fc27f14a43907c250f11bae046d077ac341e273b78480d WatchSource:0}: Error finding container ebe12b1e962a0155e0fc27f14a43907c250f11bae046d077ac341e273b78480d: Status 404 returned error can't find the container with id ebe12b1e962a0155e0fc27f14a43907c250f11bae046d077ac341e273b78480d Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.605122 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-9x5dg"] Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.607552 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj" podStartSLOduration=120.607538922 podStartE2EDuration="2m0.607538922s" podCreationTimestamp="2025-11-28 11:08:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:59.569370053 +0000 UTC m=+138.698054263" watchObservedRunningTime="2025-11-28 11:10:59.607538922 +0000 UTC m=+138.736223132" Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.613118 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-f4dfh" podStartSLOduration=119.613105079 podStartE2EDuration="1m59.613105079s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:59.588221646 +0000 UTC m=+138.716905856" watchObservedRunningTime="2025-11-28 11:10:59.613105079 +0000 UTC m=+138.741789279" Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.607964 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-k9zcz"] Nov 28 11:10:59 crc kubenswrapper[4923]: W1128 11:10:59.615608 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9bf3f7ca_e9dc_4c5a_9d6d_7622e7c545ba.slice/crio-1bdd3c20ed9f54aeea4bc0a3285fdc243ac71445f8538d3d27f16e154a79b248 WatchSource:0}: Error finding container 1bdd3c20ed9f54aeea4bc0a3285fdc243ac71445f8538d3d27f16e154a79b248: Status 404 returned error can't find the container with id 1bdd3c20ed9f54aeea4bc0a3285fdc243ac71445f8538d3d27f16e154a79b248 Nov 28 11:10:59 crc kubenswrapper[4923]: W1128 11:10:59.618699 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcdedfd6e_9082_4411_b128_fc9806c67bd3.slice/crio-cc1eed32d6f09d5be859793239543114fee6e6010605998b1a7da3464b0f47e6 WatchSource:0}: Error finding container cc1eed32d6f09d5be859793239543114fee6e6010605998b1a7da3464b0f47e6: Status 404 returned error can't find the container with id cc1eed32d6f09d5be859793239543114fee6e6010605998b1a7da3464b0f47e6 Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.652556 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jhjvz" podStartSLOduration=119.652531684 podStartE2EDuration="1m59.652531684s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:10:59.641164743 +0000 UTC m=+138.769848953" watchObservedRunningTime="2025-11-28 11:10:59.652531684 +0000 UTC m=+138.781215914" Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.702709 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:59 crc kubenswrapper[4923]: E1128 11:10:59.703110 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:00.203096084 +0000 UTC m=+139.331780294 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:59 crc kubenswrapper[4923]: W1128 11:10:59.735060 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod260bf9ee_f05f_4598_9bf9_de7ed2d1723f.slice/crio-6ee2bfd7a4cdf0567fb256c00ebfb37e68283e65a2c4bb89802ad11e9e4dc873 WatchSource:0}: Error finding container 6ee2bfd7a4cdf0567fb256c00ebfb37e68283e65a2c4bb89802ad11e9e4dc873: Status 404 returned error can't find the container with id 6ee2bfd7a4cdf0567fb256c00ebfb37e68283e65a2c4bb89802ad11e9e4dc873 Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.804850 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:59 crc kubenswrapper[4923]: E1128 11:10:59.805305 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:00.305294053 +0000 UTC m=+139.433978263 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:59 crc kubenswrapper[4923]: W1128 11:10:59.807040 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce29e4b8_83fb_402d_a969_efa9106fdf29.slice/crio-b95f685503e2953e6b0b798860435a947916c319954a5808cbbd668b71374bd7 WatchSource:0}: Error finding container b95f685503e2953e6b0b798860435a947916c319954a5808cbbd668b71374bd7: Status 404 returned error can't find the container with id b95f685503e2953e6b0b798860435a947916c319954a5808cbbd668b71374bd7 Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.910303 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:10:59 crc kubenswrapper[4923]: E1128 11:10:59.910519 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:00.410497368 +0000 UTC m=+139.539181578 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.910782 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:10:59 crc kubenswrapper[4923]: E1128 11:10:59.911087 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:00.411080394 +0000 UTC m=+139.539764604 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:10:59 crc kubenswrapper[4923]: I1128 11:10:59.939402 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tdzqr"] Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.011845 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:00 crc kubenswrapper[4923]: E1128 11:11:00.012139 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:00.512115631 +0000 UTC m=+139.640799841 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.012396 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:00 crc kubenswrapper[4923]: E1128 11:11:00.012644 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:00.512634906 +0000 UTC m=+139.641319116 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.033191 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-n2qp8"] Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.034067 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n2qp8" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.037310 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.047593 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n2qp8"] Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.049113 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-js2pf"] Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.113286 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:00 crc kubenswrapper[4923]: E1128 11:11:00.115278 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:00.615252958 +0000 UTC m=+139.743937168 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.125757 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6908a38-5adb-40c3-85e8-730eaa4937ef-catalog-content\") pod \"redhat-marketplace-n2qp8\" (UID: \"a6908a38-5adb-40c3-85e8-730eaa4937ef\") " pod="openshift-marketplace/redhat-marketplace-n2qp8" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.125803 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dd64\" (UniqueName: \"kubernetes.io/projected/a6908a38-5adb-40c3-85e8-730eaa4937ef-kube-api-access-9dd64\") pod \"redhat-marketplace-n2qp8\" (UID: \"a6908a38-5adb-40c3-85e8-730eaa4937ef\") " pod="openshift-marketplace/redhat-marketplace-n2qp8" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.125852 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.126000 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6908a38-5adb-40c3-85e8-730eaa4937ef-utilities\") pod \"redhat-marketplace-n2qp8\" (UID: \"a6908a38-5adb-40c3-85e8-730eaa4937ef\") " pod="openshift-marketplace/redhat-marketplace-n2qp8" Nov 28 11:11:00 crc kubenswrapper[4923]: E1128 11:11:00.126344 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:00.626331891 +0000 UTC m=+139.755016101 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.226939 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.227129 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6908a38-5adb-40c3-85e8-730eaa4937ef-utilities\") pod \"redhat-marketplace-n2qp8\" (UID: \"a6908a38-5adb-40c3-85e8-730eaa4937ef\") " pod="openshift-marketplace/redhat-marketplace-n2qp8" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.227193 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6908a38-5adb-40c3-85e8-730eaa4937ef-catalog-content\") pod \"redhat-marketplace-n2qp8\" (UID: \"a6908a38-5adb-40c3-85e8-730eaa4937ef\") " pod="openshift-marketplace/redhat-marketplace-n2qp8" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.227212 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dd64\" (UniqueName: \"kubernetes.io/projected/a6908a38-5adb-40c3-85e8-730eaa4937ef-kube-api-access-9dd64\") pod \"redhat-marketplace-n2qp8\" (UID: \"a6908a38-5adb-40c3-85e8-730eaa4937ef\") " pod="openshift-marketplace/redhat-marketplace-n2qp8" Nov 28 11:11:00 crc kubenswrapper[4923]: E1128 11:11:00.227577 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:00.727562823 +0000 UTC m=+139.856247033 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.227888 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6908a38-5adb-40c3-85e8-730eaa4937ef-utilities\") pod \"redhat-marketplace-n2qp8\" (UID: \"a6908a38-5adb-40c3-85e8-730eaa4937ef\") " pod="openshift-marketplace/redhat-marketplace-n2qp8" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.228142 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6908a38-5adb-40c3-85e8-730eaa4937ef-catalog-content\") pod \"redhat-marketplace-n2qp8\" (UID: \"a6908a38-5adb-40c3-85e8-730eaa4937ef\") " pod="openshift-marketplace/redhat-marketplace-n2qp8" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.321921 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dd64\" (UniqueName: \"kubernetes.io/projected/a6908a38-5adb-40c3-85e8-730eaa4937ef-kube-api-access-9dd64\") pod \"redhat-marketplace-n2qp8\" (UID: \"a6908a38-5adb-40c3-85e8-730eaa4937ef\") " pod="openshift-marketplace/redhat-marketplace-n2qp8" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.328818 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:00 crc kubenswrapper[4923]: E1128 11:11:00.329404 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:00.829387152 +0000 UTC m=+139.958071362 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.353573 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n2qp8" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.429762 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:00 crc kubenswrapper[4923]: E1128 11:11:00.429913 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:00.929892044 +0000 UTC m=+140.058576254 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.430147 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:00 crc kubenswrapper[4923]: E1128 11:11:00.430448 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:00.930436709 +0000 UTC m=+140.059120919 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.451173 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fv729"] Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.452132 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fv729" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.463131 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fv729"] Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.500965 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:00 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:00 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:00 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.501010 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.530849 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:00 crc kubenswrapper[4923]: E1128 11:11:00.530941 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:01.03090824 +0000 UTC m=+140.159592450 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.531341 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2db4e8e9-8919-4b13-afef-835484cc865a-utilities\") pod \"redhat-marketplace-fv729\" (UID: \"2db4e8e9-8919-4b13-afef-835484cc865a\") " pod="openshift-marketplace/redhat-marketplace-fv729" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.531389 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.531431 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q99hs\" (UniqueName: \"kubernetes.io/projected/2db4e8e9-8919-4b13-afef-835484cc865a-kube-api-access-q99hs\") pod \"redhat-marketplace-fv729\" (UID: \"2db4e8e9-8919-4b13-afef-835484cc865a\") " pod="openshift-marketplace/redhat-marketplace-fv729" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.531491 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2db4e8e9-8919-4b13-afef-835484cc865a-catalog-content\") pod \"redhat-marketplace-fv729\" (UID: \"2db4e8e9-8919-4b13-afef-835484cc865a\") " pod="openshift-marketplace/redhat-marketplace-fv729" Nov 28 11:11:00 crc kubenswrapper[4923]: E1128 11:11:00.531828 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:01.031815986 +0000 UTC m=+140.160500196 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.543048 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-j6dnf" event={"ID":"889057c8-1eb2-4829-b1d5-a906b88eb68c","Type":"ContainerStarted","Data":"8c6980786e4a27d6af947586dc5b831670b308923e7b91caac7194a503b1cf6c"} Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.563570 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-46sx6" event={"ID":"cffefe1d-9522-408d-aadf-c688411908e1","Type":"ContainerStarted","Data":"0d68c9ad7077585d2ee26c0cf957b1273d3edb225d251056da7de07ec386b045"} Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.611144 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-js2pf" event={"ID":"f54a8053-81cc-429f-b68e-87a3fd245263","Type":"ContainerStarted","Data":"4df7fcd080d49d8fb2e935538ac1bc88e0529729bd7f1ac2e5c6998abd42cb56"} Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.632009 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.632170 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2db4e8e9-8919-4b13-afef-835484cc865a-utilities\") pod \"redhat-marketplace-fv729\" (UID: \"2db4e8e9-8919-4b13-afef-835484cc865a\") " pod="openshift-marketplace/redhat-marketplace-fv729" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.632222 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q99hs\" (UniqueName: \"kubernetes.io/projected/2db4e8e9-8919-4b13-afef-835484cc865a-kube-api-access-q99hs\") pod \"redhat-marketplace-fv729\" (UID: \"2db4e8e9-8919-4b13-afef-835484cc865a\") " pod="openshift-marketplace/redhat-marketplace-fv729" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.632265 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2db4e8e9-8919-4b13-afef-835484cc865a-catalog-content\") pod \"redhat-marketplace-fv729\" (UID: \"2db4e8e9-8919-4b13-afef-835484cc865a\") " pod="openshift-marketplace/redhat-marketplace-fv729" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.632648 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2db4e8e9-8919-4b13-afef-835484cc865a-catalog-content\") pod \"redhat-marketplace-fv729\" (UID: \"2db4e8e9-8919-4b13-afef-835484cc865a\") " pod="openshift-marketplace/redhat-marketplace-fv729" Nov 28 11:11:00 crc kubenswrapper[4923]: E1128 11:11:00.632706 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:01.132693248 +0000 UTC m=+140.261377458 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.632898 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2db4e8e9-8919-4b13-afef-835484cc865a-utilities\") pod \"redhat-marketplace-fv729\" (UID: \"2db4e8e9-8919-4b13-afef-835484cc865a\") " pod="openshift-marketplace/redhat-marketplace-fv729" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.645291 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" event={"ID":"cdedfd6e-9082-4411-b128-fc9806c67bd3","Type":"ContainerStarted","Data":"cc1eed32d6f09d5be859793239543114fee6e6010605998b1a7da3464b0f47e6"} Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.651771 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdzqr" event={"ID":"e28cae41-ead3-4395-a457-3077c92068ca","Type":"ContainerStarted","Data":"19526b26813de4500fec40b55a09e3db8044eb0cf662eae1b0efbe1f0d2f91bf"} Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.666971 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q99hs\" (UniqueName: \"kubernetes.io/projected/2db4e8e9-8919-4b13-afef-835484cc865a-kube-api-access-q99hs\") pod \"redhat-marketplace-fv729\" (UID: \"2db4e8e9-8919-4b13-afef-835484cc865a\") " pod="openshift-marketplace/redhat-marketplace-fv729" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.669889 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hcztj" event={"ID":"31885971-a674-4f47-999b-d7e5435f34d0","Type":"ContainerStarted","Data":"ebe12b1e962a0155e0fc27f14a43907c250f11bae046d077ac341e273b78480d"} Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.695086 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-44p4v" event={"ID":"9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba","Type":"ContainerStarted","Data":"1bdd3c20ed9f54aeea4bc0a3285fdc243ac71445f8538d3d27f16e154a79b248"} Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.703201 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2" event={"ID":"d25bad00-52c9-449a-a73b-b53b4c4f2577","Type":"ContainerStarted","Data":"1f5fa9e45f5fdd4d9dc5588b460daf6631d088af9aebb2dc6103338dce0f695e"} Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.733390 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-l7sqm" podStartSLOduration=120.733371195 podStartE2EDuration="2m0.733371195s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:00.732317915 +0000 UTC m=+139.861002135" watchObservedRunningTime="2025-11-28 11:11:00.733371195 +0000 UTC m=+139.862055405" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.734442 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:00 crc kubenswrapper[4923]: E1128 11:11:00.734770 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:01.234758734 +0000 UTC m=+140.363442944 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.738342 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-8b2zb" event={"ID":"641c9aa3-7da2-4122-9410-cd46d1733143","Type":"ContainerStarted","Data":"4cd5b7597b8c2377c33ec3a15061b583ca776febf439f24196132e977b386102"} Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.755078 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" event={"ID":"ce29e4b8-83fb-402d-a969-efa9106fdf29","Type":"ContainerStarted","Data":"b95f685503e2953e6b0b798860435a947916c319954a5808cbbd668b71374bd7"} Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.797459 4923 generic.go:334] "Generic (PLEG): container finished" podID="37ddbfb0-c042-460d-b772-9cdd214a79a1" containerID="8bc2302007d479824c9671aa77f1e77973047e266b3ebaab0a8ab343ad31fa7f" exitCode=0 Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.797743 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" event={"ID":"37ddbfb0-c042-460d-b772-9cdd214a79a1","Type":"ContainerDied","Data":"8bc2302007d479824c9671aa77f1e77973047e266b3ebaab0a8ab343ad31fa7f"} Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.807366 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-m6kq9" event={"ID":"4b125207-13c8-4142-86d1-99645442eddf","Type":"ContainerStarted","Data":"64d0b9cbba895a73bea9a056b80e7c708da14f9db875587b41451eca764df1a3"} Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.838320 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.838861 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" event={"ID":"bc404fb9-c265-4265-84e8-e3dd111fae9a","Type":"ContainerStarted","Data":"dbf363100d2155ec3c3a027a44a9dfc11953264f80e04b558e022f970c64a472"} Nov 28 11:11:00 crc kubenswrapper[4923]: E1128 11:11:00.839922 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:01.339904667 +0000 UTC m=+140.468588867 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.840107 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.851196 4923 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-lswhk container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.34:8080/healthz\": dial tcp 10.217.0.34:8080: connect: connection refused" start-of-body= Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.851265 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" podUID="bc404fb9-c265-4265-84e8-e3dd111fae9a" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.34:8080/healthz\": dial tcp 10.217.0.34:8080: connect: connection refused" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.852280 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-m4cv2" event={"ID":"beb2a474-a9eb-4a5a-915b-003a2654a0d0","Type":"ContainerStarted","Data":"e5a2378d059d1ec6115fd9019605fd9f78670b870c3073329108c43317266d1d"} Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.855688 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l4xf8" event={"ID":"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4","Type":"ContainerStarted","Data":"dad33e415182a38bca3d54483b7850c4ce969dbd825a76d3fa0a767ce24adfd8"} Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.860249 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fv729" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.863584 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm" event={"ID":"260bf9ee-f05f-4598-9bf9-de7ed2d1723f","Type":"ContainerStarted","Data":"6ee2bfd7a4cdf0567fb256c00ebfb37e68283e65a2c4bb89802ad11e9e4dc873"} Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.871994 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-m6kq9" podStartSLOduration=120.871981954 podStartE2EDuration="2m0.871981954s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:00.871405608 +0000 UTC m=+140.000089818" watchObservedRunningTime="2025-11-28 11:11:00.871981954 +0000 UTC m=+140.000666164" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.875369 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-k9zcz" event={"ID":"74a3fc68-fb6b-4070-85ea-a4e70aa6406b","Type":"ContainerStarted","Data":"52b886d7ef36df47e1e662576315b308cce3489253bf8e31ec307e6b2f815b69"} Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.900520 4923 generic.go:334] "Generic (PLEG): container finished" podID="e7f08880-f8db-4170-8d1d-1bccb2df10f4" containerID="7d2aa0b10bc957f35df054435f0b3c7f840bf4bb3ff168fca5e31bbadeaaacc3" exitCode=0 Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.900575 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" event={"ID":"e7f08880-f8db-4170-8d1d-1bccb2df10f4","Type":"ContainerDied","Data":"7d2aa0b10bc957f35df054435f0b3c7f840bf4bb3ff168fca5e31bbadeaaacc3"} Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.944194 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" podStartSLOduration=121.944179196 podStartE2EDuration="2m1.944179196s" podCreationTimestamp="2025-11-28 11:08:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:00.943679402 +0000 UTC m=+140.072363612" watchObservedRunningTime="2025-11-28 11:11:00.944179196 +0000 UTC m=+140.072863406" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.944612 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" podStartSLOduration=120.944607598 podStartE2EDuration="2m0.944607598s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:00.8969234 +0000 UTC m=+140.025607600" watchObservedRunningTime="2025-11-28 11:11:00.944607598 +0000 UTC m=+140.073291808" Nov 28 11:11:00 crc kubenswrapper[4923]: I1128 11:11:00.945757 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:00 crc kubenswrapper[4923]: E1128 11:11:00.948820 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:01.448809857 +0000 UTC m=+140.577494067 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.003115 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zfzrm" event={"ID":"00c5c14d-d989-41a8-8447-c1a9c47426a2","Type":"ContainerStarted","Data":"7a5cb11c401ef501ebca6f1cf955a539d99ad63fdf99f70d10be872f6bacae3c"} Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.014213 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-k9zcz" podStartSLOduration=121.014200436 podStartE2EDuration="2m1.014200436s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:01.01399462 +0000 UTC m=+140.142678830" watchObservedRunningTime="2025-11-28 11:11:01.014200436 +0000 UTC m=+140.142884646" Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.016690 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" event={"ID":"4926f4a2-0ee6-444b-a113-f6ee1d162d72","Type":"ContainerStarted","Data":"e3c5d154b8c55b19163ead2f0111b66836dc5f3df9915579bfce751c70970912"} Nov 28 11:11:01 crc kubenswrapper[4923]: E1128 11:11:01.051377 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:01.551348236 +0000 UTC m=+140.680032446 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.050891 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.056289 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:01 crc kubenswrapper[4923]: E1128 11:11:01.058179 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:01.558161829 +0000 UTC m=+140.686846039 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.070587 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-fd2jt" event={"ID":"7638a2ec-d85c-456d-9d1b-9e56d83eae4b","Type":"ContainerStarted","Data":"a910293280a22eabf603939f9c5402a655c4f9bdbb07f224f3cf4157fdd3179a"} Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.126364 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-2vsdg" event={"ID":"aa97fc63-7e09-4217-9fb9-78fca4703f04","Type":"ContainerStarted","Data":"075d31152141784f0f1c4ea6ee5114d374239a290609a5f8e4e57c4b14ab6981"} Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.129887 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" podStartSLOduration=122.129871676 podStartE2EDuration="2m2.129871676s" podCreationTimestamp="2025-11-28 11:08:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:01.129855196 +0000 UTC m=+140.258539406" watchObservedRunningTime="2025-11-28 11:11:01.129871676 +0000 UTC m=+140.258555886" Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.131593 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" podStartSLOduration=121.131586855 podStartE2EDuration="2m1.131586855s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:01.079604525 +0000 UTC m=+140.208288735" watchObservedRunningTime="2025-11-28 11:11:01.131586855 +0000 UTC m=+140.260271065" Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.164455 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:01 crc kubenswrapper[4923]: E1128 11:11:01.164615 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:01.664590158 +0000 UTC m=+140.793274368 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.164700 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:01 crc kubenswrapper[4923]: E1128 11:11:01.165116 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:01.665099322 +0000 UTC m=+140.793783532 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.235631 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dxdgg" podStartSLOduration=121.235613186 podStartE2EDuration="2m1.235613186s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:01.201343397 +0000 UTC m=+140.330027607" watchObservedRunningTime="2025-11-28 11:11:01.235613186 +0000 UTC m=+140.364297396" Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.236686 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-2vsdg" podStartSLOduration=122.236679266 podStartE2EDuration="2m2.236679266s" podCreationTimestamp="2025-11-28 11:08:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:01.236673446 +0000 UTC m=+140.365357656" watchObservedRunningTime="2025-11-28 11:11:01.236679266 +0000 UTC m=+140.365363476" Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.273297 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:01 crc kubenswrapper[4923]: E1128 11:11:01.274517 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:01.774503126 +0000 UTC m=+140.903187336 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.375608 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:01 crc kubenswrapper[4923]: E1128 11:11:01.375888 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:01.875876901 +0000 UTC m=+141.004561111 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.432196 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qkkh6"] Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.433069 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qkkh6" Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.444976 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.479335 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:01 crc kubenswrapper[4923]: E1128 11:11:01.479543 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:01.979510892 +0000 UTC m=+141.108195102 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.480114 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:01 crc kubenswrapper[4923]: E1128 11:11:01.480458 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:01.980451178 +0000 UTC m=+141.109135388 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.498985 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:01 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:01 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:01 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.499192 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.534979 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qkkh6"] Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.593155 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:01 crc kubenswrapper[4923]: E1128 11:11:01.593273 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:02.093257268 +0000 UTC m=+141.221941478 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.595155 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsdls\" (UniqueName: \"kubernetes.io/projected/39eebecc-004d-445a-ac63-fad7bc311127-kube-api-access-wsdls\") pod \"redhat-operators-qkkh6\" (UID: \"39eebecc-004d-445a-ac63-fad7bc311127\") " pod="openshift-marketplace/redhat-operators-qkkh6" Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.595270 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39eebecc-004d-445a-ac63-fad7bc311127-utilities\") pod \"redhat-operators-qkkh6\" (UID: \"39eebecc-004d-445a-ac63-fad7bc311127\") " pod="openshift-marketplace/redhat-operators-qkkh6" Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.595362 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39eebecc-004d-445a-ac63-fad7bc311127-catalog-content\") pod \"redhat-operators-qkkh6\" (UID: \"39eebecc-004d-445a-ac63-fad7bc311127\") " pod="openshift-marketplace/redhat-operators-qkkh6" Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.595500 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:01 crc kubenswrapper[4923]: E1128 11:11:01.595836 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:02.0958284 +0000 UTC m=+141.224512610 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.700823 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.701158 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsdls\" (UniqueName: \"kubernetes.io/projected/39eebecc-004d-445a-ac63-fad7bc311127-kube-api-access-wsdls\") pod \"redhat-operators-qkkh6\" (UID: \"39eebecc-004d-445a-ac63-fad7bc311127\") " pod="openshift-marketplace/redhat-operators-qkkh6" Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.701206 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39eebecc-004d-445a-ac63-fad7bc311127-utilities\") pod \"redhat-operators-qkkh6\" (UID: \"39eebecc-004d-445a-ac63-fad7bc311127\") " pod="openshift-marketplace/redhat-operators-qkkh6" Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.701227 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39eebecc-004d-445a-ac63-fad7bc311127-catalog-content\") pod \"redhat-operators-qkkh6\" (UID: \"39eebecc-004d-445a-ac63-fad7bc311127\") " pod="openshift-marketplace/redhat-operators-qkkh6" Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.701607 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39eebecc-004d-445a-ac63-fad7bc311127-catalog-content\") pod \"redhat-operators-qkkh6\" (UID: \"39eebecc-004d-445a-ac63-fad7bc311127\") " pod="openshift-marketplace/redhat-operators-qkkh6" Nov 28 11:11:01 crc kubenswrapper[4923]: E1128 11:11:01.701676 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:02.201664043 +0000 UTC m=+141.330348253 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.709132 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39eebecc-004d-445a-ac63-fad7bc311127-utilities\") pod \"redhat-operators-qkkh6\" (UID: \"39eebecc-004d-445a-ac63-fad7bc311127\") " pod="openshift-marketplace/redhat-operators-qkkh6" Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.721403 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsdls\" (UniqueName: \"kubernetes.io/projected/39eebecc-004d-445a-ac63-fad7bc311127-kube-api-access-wsdls\") pod \"redhat-operators-qkkh6\" (UID: \"39eebecc-004d-445a-ac63-fad7bc311127\") " pod="openshift-marketplace/redhat-operators-qkkh6" Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.784293 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qkkh6" Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.802334 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:01 crc kubenswrapper[4923]: E1128 11:11:01.802677 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:02.302666439 +0000 UTC m=+141.431350649 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.846167 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-h4kzb"] Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.859097 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h4kzb" Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.875378 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h4kzb"] Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.903174 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:01 crc kubenswrapper[4923]: E1128 11:11:01.903792 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:02.403778968 +0000 UTC m=+141.532463178 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:01 crc kubenswrapper[4923]: I1128 11:11:01.938254 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n2qp8"] Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.005459 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v77lg\" (UniqueName: \"kubernetes.io/projected/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb-kube-api-access-v77lg\") pod \"redhat-operators-h4kzb\" (UID: \"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb\") " pod="openshift-marketplace/redhat-operators-h4kzb" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.005552 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.005580 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb-utilities\") pod \"redhat-operators-h4kzb\" (UID: \"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb\") " pod="openshift-marketplace/redhat-operators-h4kzb" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.005602 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb-catalog-content\") pod \"redhat-operators-h4kzb\" (UID: \"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb\") " pod="openshift-marketplace/redhat-operators-h4kzb" Nov 28 11:11:02 crc kubenswrapper[4923]: E1128 11:11:02.005866 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:02.505856194 +0000 UTC m=+141.634540404 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.106318 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.106712 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v77lg\" (UniqueName: \"kubernetes.io/projected/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb-kube-api-access-v77lg\") pod \"redhat-operators-h4kzb\" (UID: \"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb\") " pod="openshift-marketplace/redhat-operators-h4kzb" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.106807 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb-utilities\") pod \"redhat-operators-h4kzb\" (UID: \"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb\") " pod="openshift-marketplace/redhat-operators-h4kzb" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.106845 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb-catalog-content\") pod \"redhat-operators-h4kzb\" (UID: \"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb\") " pod="openshift-marketplace/redhat-operators-h4kzb" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.107243 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb-catalog-content\") pod \"redhat-operators-h4kzb\" (UID: \"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb\") " pod="openshift-marketplace/redhat-operators-h4kzb" Nov 28 11:11:02 crc kubenswrapper[4923]: E1128 11:11:02.107313 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:02.607299472 +0000 UTC m=+141.735983683 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.107737 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb-utilities\") pod \"redhat-operators-h4kzb\" (UID: \"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb\") " pod="openshift-marketplace/redhat-operators-h4kzb" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.107795 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fv729"] Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.151606 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v77lg\" (UniqueName: \"kubernetes.io/projected/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb-kube-api-access-v77lg\") pod \"redhat-operators-h4kzb\" (UID: \"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb\") " pod="openshift-marketplace/redhat-operators-h4kzb" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.193362 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-m4cv2" event={"ID":"beb2a474-a9eb-4a5a-915b-003a2654a0d0","Type":"ContainerStarted","Data":"c211bd1ef392a3443aa4cbca32a6eb54d093475c749f1a47cb91cbdd8076026d"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.193631 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-m4cv2" event={"ID":"beb2a474-a9eb-4a5a-915b-003a2654a0d0","Type":"ContainerStarted","Data":"6f1f5cf9ca85b6c2a6e4f8f08688e0eacbe21f7f1ef283de6d2db181d4620380"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.193880 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-m4cv2" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.208310 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:02 crc kubenswrapper[4923]: E1128 11:11:02.208620 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:02.708608897 +0000 UTC m=+141.837293107 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.216394 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-m6kq9" event={"ID":"4b125207-13c8-4142-86d1-99645442eddf","Type":"ContainerStarted","Data":"84a5f5db923005eeae62a6ba8c434da768381b7374433e778a16639c71f2b516"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.220218 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-m4cv2" podStartSLOduration=10.220206655 podStartE2EDuration="10.220206655s" podCreationTimestamp="2025-11-28 11:10:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:02.218117796 +0000 UTC m=+141.346802006" watchObservedRunningTime="2025-11-28 11:11:02.220206655 +0000 UTC m=+141.348890865" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.220804 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h4kzb" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.258546 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dxdgg" event={"ID":"14ef552e-14c1-49e6-b06d-0736e2a3ed73","Type":"ContainerStarted","Data":"6551b308a5cb38ef75ae33a2ce7703c60ecded99c85fbd0007e1f51fc4348c63"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.287285 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-j6dnf" event={"ID":"889057c8-1eb2-4829-b1d5-a906b88eb68c","Type":"ContainerStarted","Data":"8186dab1d35fecea5301ccaaf7369fc170f2cb9a1387c15ac4e9529b457e7d13"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.325869 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:02 crc kubenswrapper[4923]: E1128 11:11:02.327126 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:02.827105248 +0000 UTC m=+141.955789458 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.350401 4923 generic.go:334] "Generic (PLEG): container finished" podID="ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89" containerID="73a41071dd19a6a0e8a0f6f1a9488c6407d59ab295ef40752949d8cabd9b61ba" exitCode=0 Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.350512 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj" event={"ID":"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89","Type":"ContainerDied","Data":"73a41071dd19a6a0e8a0f6f1a9488c6407d59ab295ef40752949d8cabd9b61ba"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.375919 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hcztj" event={"ID":"31885971-a674-4f47-999b-d7e5435f34d0","Type":"ContainerStarted","Data":"a04f5d90ea8028edd417207c7106e8d69185d2fccaa6de0c90b74ada7cc4f781"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.375983 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hcztj" event={"ID":"31885971-a674-4f47-999b-d7e5435f34d0","Type":"ContainerStarted","Data":"ab0963723a561ec3239a9347f2489b2b9c716048c58667a7ad2076929eac7e64"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.385690 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cnw7p" event={"ID":"3de372b7-d84f-46f7-b773-255e32d0e882","Type":"ContainerStarted","Data":"6a15e4dd075e05d66b5223dd562b0dba351edfaa946912ab716e9e0ab37ef488"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.385730 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cnw7p" event={"ID":"3de372b7-d84f-46f7-b773-255e32d0e882","Type":"ContainerStarted","Data":"ae6c216321a00e128c23e3d1f71db3f2227d81b6481d9e3a41e4694a0d8f5b52"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.386439 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cnw7p" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.401961 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ckhbm" event={"ID":"1b9cce3b-92fb-45f2-a81a-4a0a722ed13e","Type":"ContainerStarted","Data":"043ffa4cb7daf30843980717321dfa02b7ef582b39fa10feb2d7c22e08f25b89"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.432788 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:02 crc kubenswrapper[4923]: E1128 11:11:02.434056 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:02.934039941 +0000 UTC m=+142.062724151 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.439191 4923 generic.go:334] "Generic (PLEG): container finished" podID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" containerID="5f84e44d04ae5872ea05bdcaa7e7d623d3e51987cb7a2b80955128fb4344cb39" exitCode=0 Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.439267 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l4xf8" event={"ID":"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4","Type":"ContainerDied","Data":"5f84e44d04ae5872ea05bdcaa7e7d623d3e51987cb7a2b80955128fb4344cb39"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.463599 4923 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.480023 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hcztj" podStartSLOduration=122.480008811 podStartE2EDuration="2m2.480008811s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:02.477443358 +0000 UTC m=+141.606127558" watchObservedRunningTime="2025-11-28 11:11:02.480008811 +0000 UTC m=+141.608693011" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.482162 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-fd2jt" event={"ID":"7638a2ec-d85c-456d-9d1b-9e56d83eae4b","Type":"ContainerStarted","Data":"27c97717afa24326637bb23ac806ff843879c6264af3d4c696c5612bcc507483"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.482966 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-fd2jt" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.496382 4923 patch_prober.go:28] interesting pod/downloads-7954f5f757-fd2jt container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.496430 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fd2jt" podUID="7638a2ec-d85c-456d-9d1b-9e56d83eae4b" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.505074 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:02 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:02 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:02 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.505111 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.511438 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-s5wfn" event={"ID":"03fb8831-5c58-4bcf-8e9c-d18d0074be3e","Type":"ContainerStarted","Data":"8b20cfe301df127cc703026aee5a16a4590fb08cc51042a6e5f17c9f8fa6dc34"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.530059 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-2vsdg" event={"ID":"aa97fc63-7e09-4217-9fb9-78fca4703f04","Type":"ContainerStarted","Data":"f8eb92c4f8e5e324e7afdaa03d4ca4a6396aada381ec16ee9fb2b01c24c67b62"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.535096 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:02 crc kubenswrapper[4923]: E1128 11:11:02.536229 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:03.03621231 +0000 UTC m=+142.164896520 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.556766 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zfzrm" event={"ID":"00c5c14d-d989-41a8-8447-c1a9c47426a2","Type":"ContainerStarted","Data":"1537dbca4382eb03d1e188f49f0bd0268aeb95e97e2367ef36ad3ffcea3b8014"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.586831 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cnw7p" podStartSLOduration=122.586814291 podStartE2EDuration="2m2.586814291s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:02.529009026 +0000 UTC m=+141.657693236" watchObservedRunningTime="2025-11-28 11:11:02.586814291 +0000 UTC m=+141.715498501" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.587262 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm" event={"ID":"260bf9ee-f05f-4598-9bf9-de7ed2d1723f","Type":"ContainerStarted","Data":"51c778e3e650f0c1d1d945871dcc6fc84287f73cd57dec266b69226af4807a97"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.588067 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.606263 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" event={"ID":"e7f08880-f8db-4170-8d1d-1bccb2df10f4","Type":"ContainerStarted","Data":"be309d91d9f2fcb067410a7f82f93685ed6cac41eba827b93c445c65854d6cba"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.606451 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.606983 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.620244 4923 generic.go:334] "Generic (PLEG): container finished" podID="e28cae41-ead3-4395-a457-3077c92068ca" containerID="2854176fb098059aedd7457ba3d68d85f2bf68ac49940a384325e1fc29bc5768" exitCode=0 Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.620307 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdzqr" event={"ID":"e28cae41-ead3-4395-a457-3077c92068ca","Type":"ContainerDied","Data":"2854176fb098059aedd7457ba3d68d85f2bf68ac49940a384325e1fc29bc5768"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.637754 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:02 crc kubenswrapper[4923]: E1128 11:11:02.640147 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:03.140135709 +0000 UTC m=+142.268819919 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.650216 4923 generic.go:334] "Generic (PLEG): container finished" podID="cffefe1d-9522-408d-aadf-c688411908e1" containerID="5c3d75468936d3cfc4d995838da04a93d38b91f0ce843c1e6e43509ddd14bae3" exitCode=0 Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.650280 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-46sx6" event={"ID":"cffefe1d-9522-408d-aadf-c688411908e1","Type":"ContainerDied","Data":"5c3d75468936d3cfc4d995838da04a93d38b91f0ce843c1e6e43509ddd14bae3"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.667199 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" event={"ID":"cdedfd6e-9082-4411-b128-fc9806c67bd3","Type":"ContainerStarted","Data":"72002c716bb9efd84e0ba650c8c77eb6ae7f82c4b4a8780b28653cb26db489d9"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.667567 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qkkh6"] Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.668306 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.680301 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.695118 4923 generic.go:334] "Generic (PLEG): container finished" podID="f54a8053-81cc-429f-b68e-87a3fd245263" containerID="fc1ec96a53b6cdebdc3648bd3698a09c44264b73cd21633fd6c7d6357f9be39f" exitCode=0 Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.695191 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-js2pf" event={"ID":"f54a8053-81cc-429f-b68e-87a3fd245263","Type":"ContainerDied","Data":"fc1ec96a53b6cdebdc3648bd3698a09c44264b73cd21633fd6c7d6357f9be39f"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.697279 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-fd2jt" podStartSLOduration=123.697270144 podStartE2EDuration="2m3.697270144s" podCreationTimestamp="2025-11-28 11:08:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:02.696376179 +0000 UTC m=+141.825060379" watchObservedRunningTime="2025-11-28 11:11:02.697270144 +0000 UTC m=+141.825954354" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.703391 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2" event={"ID":"d25bad00-52c9-449a-a73b-b53b4c4f2577","Type":"ContainerStarted","Data":"9d3655f5bb4744ffbfa6f1d58b1cce7d9571cd5c1b37a246e7522b9bc4c133db"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.703424 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2" event={"ID":"d25bad00-52c9-449a-a73b-b53b4c4f2577","Type":"ContainerStarted","Data":"7ded58497d27932bd6f6e084ff07266bfe7bb8f8c10d8613f8ab55c6d46d6217"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.710574 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n2qp8" event={"ID":"a6908a38-5adb-40c3-85e8-730eaa4937ef","Type":"ContainerStarted","Data":"a9e7160ad2781e280da5d5ddc8aa57b3d0b20e676e3672c2fe549f67baf550cc"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.728691 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-k9zcz" event={"ID":"74a3fc68-fb6b-4070-85ea-a4e70aa6406b","Type":"ContainerStarted","Data":"2c831ae9c6c88d973f4d0f122fb1fdb873f4b4dbcbb7ad85998227dafe2992b3"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.740175 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:02 crc kubenswrapper[4923]: E1128 11:11:02.740862 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:03.240848006 +0000 UTC m=+142.369532216 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.744848 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-44p4v" event={"ID":"9bf3f7ca-e9dc-4c5a-9d6d-7622e7c545ba","Type":"ContainerStarted","Data":"bec2bb37a02c358ebf061a6d8cccb614a1a41eae6bc51932c7dc41dd90528c5e"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.751994 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" event={"ID":"bc404fb9-c265-4265-84e8-e3dd111fae9a","Type":"ContainerStarted","Data":"00403708fb60cdd5ce416fdf4b557eed99855044ed47656236d9eec393278396"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.752461 4923 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-lswhk container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.34:8080/healthz\": dial tcp 10.217.0.34:8080: connect: connection refused" start-of-body= Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.752497 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" podUID="bc404fb9-c265-4265-84e8-e3dd111fae9a" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.34:8080/healthz\": dial tcp 10.217.0.34:8080: connect: connection refused" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.761733 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" event={"ID":"ce29e4b8-83fb-402d-a969-efa9106fdf29","Type":"ContainerStarted","Data":"0245219ca00694b5e8e31d0cbb8b9d8a16a624174e336842ed028d3ab4225aaf"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.807633 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-l7sqm" event={"ID":"03781a39-3788-4e8a-9a0d-d97c3fb9e4b3","Type":"ContainerStarted","Data":"7a03b08765d8a35af0e45add3a49638364132f7061a8733060a9152c135a1edb"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.841781 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:02 crc kubenswrapper[4923]: E1128 11:11:02.844301 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:03.344290721 +0000 UTC m=+142.472974921 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.869601 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" event={"ID":"37ddbfb0-c042-460d-b772-9cdd214a79a1","Type":"ContainerStarted","Data":"9e108f9ba90f6102396c4829825e57e60f79259641ec9058a6433bf1b42cb8a2"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.877632 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-8b2zb" event={"ID":"641c9aa3-7da2-4122-9410-cd46d1733143","Type":"ContainerStarted","Data":"199b393fc948cc46416ed51259870098aec52cd3f2702b16088431f46a34e680"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.880402 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" event={"ID":"4926f4a2-0ee6-444b-a113-f6ee1d162d72","Type":"ContainerStarted","Data":"76bd3258fa04e9fe60329d75313b5a789f6133072363a84dc0d705ce69b412c6"} Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.932943 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-kwrnm" podStartSLOduration=122.932915097 podStartE2EDuration="2m2.932915097s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:02.932107684 +0000 UTC m=+142.060791894" watchObservedRunningTime="2025-11-28 11:11:02.932915097 +0000 UTC m=+142.061599297" Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.943052 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:02 crc kubenswrapper[4923]: E1128 11:11:02.944063 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:03.444045282 +0000 UTC m=+142.572729492 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:02 crc kubenswrapper[4923]: I1128 11:11:02.975270 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" podStartSLOduration=123.975256174 podStartE2EDuration="2m3.975256174s" podCreationTimestamp="2025-11-28 11:08:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:02.975251834 +0000 UTC m=+142.103936044" watchObservedRunningTime="2025-11-28 11:11:02.975256174 +0000 UTC m=+142.103940384" Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.013709 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zfzrm" podStartSLOduration=123.013694971 podStartE2EDuration="2m3.013694971s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:03.008652819 +0000 UTC m=+142.137337029" watchObservedRunningTime="2025-11-28 11:11:03.013694971 +0000 UTC m=+142.142379181" Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.032367 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" podStartSLOduration=123.032348639 podStartE2EDuration="2m3.032348639s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:03.029422806 +0000 UTC m=+142.158107016" watchObservedRunningTime="2025-11-28 11:11:03.032348639 +0000 UTC m=+142.161032849" Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.045903 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:03 crc kubenswrapper[4923]: E1128 11:11:03.048233 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:03.548222027 +0000 UTC m=+142.676906227 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.103977 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-8b2zb" podStartSLOduration=11.103959893 podStartE2EDuration="11.103959893s" podCreationTimestamp="2025-11-28 11:10:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:03.103696626 +0000 UTC m=+142.232380836" watchObservedRunningTime="2025-11-28 11:11:03.103959893 +0000 UTC m=+142.232644103" Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.137495 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-44p4v" podStartSLOduration=123.137479721 podStartE2EDuration="2m3.137479721s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:03.131597775 +0000 UTC m=+142.260281985" watchObservedRunningTime="2025-11-28 11:11:03.137479721 +0000 UTC m=+142.266163931" Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.150362 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:03 crc kubenswrapper[4923]: E1128 11:11:03.150982 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:03.650962263 +0000 UTC m=+142.779646473 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.251901 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:03 crc kubenswrapper[4923]: E1128 11:11:03.252209 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:03.752197285 +0000 UTC m=+142.880881495 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.265281 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7j5p2" podStartSLOduration=123.265264644 podStartE2EDuration="2m3.265264644s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:03.210496216 +0000 UTC m=+142.339180426" watchObservedRunningTime="2025-11-28 11:11:03.265264644 +0000 UTC m=+142.393948854" Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.280241 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h4kzb"] Nov 28 11:11:03 crc kubenswrapper[4923]: W1128 11:11:03.307912 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4fa3aef4_051e_48bb_91a3_bd8bd4c2befb.slice/crio-dfd0457d174dcbe33afcc6f20723ef67e6f884111417ff58f5eef940dcee9af2 WatchSource:0}: Error finding container dfd0457d174dcbe33afcc6f20723ef67e6f884111417ff58f5eef940dcee9af2: Status 404 returned error can't find the container with id dfd0457d174dcbe33afcc6f20723ef67e6f884111417ff58f5eef940dcee9af2 Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.361535 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:03 crc kubenswrapper[4923]: E1128 11:11:03.362142 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:03.862127403 +0000 UTC m=+142.990811613 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.463533 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:03 crc kubenswrapper[4923]: E1128 11:11:03.463876 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:03.96386458 +0000 UTC m=+143.092548790 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.503378 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:03 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:03 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:03 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.503429 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.564100 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:03 crc kubenswrapper[4923]: E1128 11:11:03.564225 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:04.064201337 +0000 UTC m=+143.192885547 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.564488 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:03 crc kubenswrapper[4923]: E1128 11:11:03.564827 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:04.064815214 +0000 UTC m=+143.193499424 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.665370 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:03 crc kubenswrapper[4923]: E1128 11:11:03.665552 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:04.165526952 +0000 UTC m=+143.294211162 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.666041 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:03 crc kubenswrapper[4923]: E1128 11:11:03.666379 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:04.166363696 +0000 UTC m=+143.295047906 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.767055 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:03 crc kubenswrapper[4923]: E1128 11:11:03.767209 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:04.267179036 +0000 UTC m=+143.395863246 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.767473 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:03 crc kubenswrapper[4923]: E1128 11:11:03.767755 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:04.267746812 +0000 UTC m=+143.396431022 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.869009 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:03 crc kubenswrapper[4923]: E1128 11:11:03.869305 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:04.369276043 +0000 UTC m=+143.497960253 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.968138 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-j6dnf" event={"ID":"889057c8-1eb2-4829-b1d5-a906b88eb68c","Type":"ContainerStarted","Data":"ac66d0678c0cef1e6417075c91288ec74e21bbbbe84cb53f06f8e46c72fc6735"} Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.974259 4923 generic.go:334] "Generic (PLEG): container finished" podID="4fa3aef4-051e-48bb-91a3-bd8bd4c2befb" containerID="06bfba2db2bce8e36e6fd4169013d80d97f1685ab3f84dfb049ad064eba37e31" exitCode=0 Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.974328 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h4kzb" event={"ID":"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb","Type":"ContainerDied","Data":"06bfba2db2bce8e36e6fd4169013d80d97f1685ab3f84dfb049ad064eba37e31"} Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.974351 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h4kzb" event={"ID":"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb","Type":"ContainerStarted","Data":"dfd0457d174dcbe33afcc6f20723ef67e6f884111417ff58f5eef940dcee9af2"} Nov 28 11:11:03 crc kubenswrapper[4923]: I1128 11:11:03.978537 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:03 crc kubenswrapper[4923]: E1128 11:11:03.978984 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:04.478900433 +0000 UTC m=+143.607584643 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.048886 4923 generic.go:334] "Generic (PLEG): container finished" podID="2db4e8e9-8919-4b13-afef-835484cc865a" containerID="a9e8fe96817b2d49b052dfde01a746e54d90b02d1e6a38914d44dc0c0fd8c72b" exitCode=0 Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.048974 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fv729" event={"ID":"2db4e8e9-8919-4b13-afef-835484cc865a","Type":"ContainerDied","Data":"a9e8fe96817b2d49b052dfde01a746e54d90b02d1e6a38914d44dc0c0fd8c72b"} Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.048997 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fv729" event={"ID":"2db4e8e9-8919-4b13-afef-835484cc865a","Type":"ContainerStarted","Data":"8b9ef4f3e6c7bd96976f6a30d9cf975dbd7f8b37f3ea266e95d2c52800960112"} Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.068708 4923 generic.go:334] "Generic (PLEG): container finished" podID="39eebecc-004d-445a-ac63-fad7bc311127" containerID="7f549cf77a8107e54eaa0fb4a5051ee9e7132f25e62fe1ffc8fc94f409ea6d07" exitCode=0 Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.068764 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkkh6" event={"ID":"39eebecc-004d-445a-ac63-fad7bc311127","Type":"ContainerDied","Data":"7f549cf77a8107e54eaa0fb4a5051ee9e7132f25e62fe1ffc8fc94f409ea6d07"} Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.068788 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkkh6" event={"ID":"39eebecc-004d-445a-ac63-fad7bc311127","Type":"ContainerStarted","Data":"1784becd123262fb5a6b0fcc553667cf7c9d05ba4a1119ac1187fcb92e706d0d"} Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.082575 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:04 crc kubenswrapper[4923]: E1128 11:11:04.082823 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:04.582804621 +0000 UTC m=+143.711488831 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.082874 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:04 crc kubenswrapper[4923]: E1128 11:11:04.083905 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:04.583892931 +0000 UTC m=+143.712577141 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.084875 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" event={"ID":"ce29e4b8-83fb-402d-a969-efa9106fdf29","Type":"ContainerStarted","Data":"8239e50927e9e6a6efb09b27503eba15cfeb1999f23e277750498a6142bd4259"} Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.089864 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" event={"ID":"37ddbfb0-c042-460d-b772-9cdd214a79a1","Type":"ContainerStarted","Data":"cae4ace1621ddbff60bb9bcf0f03b252f0a34f2b055fcd2d88afc7b823da91ec"} Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.113949 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-j6dnf" podStartSLOduration=125.11391407 podStartE2EDuration="2m5.11391407s" podCreationTimestamp="2025-11-28 11:08:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:04.075194305 +0000 UTC m=+143.203878515" watchObservedRunningTime="2025-11-28 11:11:04.11391407 +0000 UTC m=+143.242598280" Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.132373 4923 generic.go:334] "Generic (PLEG): container finished" podID="a6908a38-5adb-40c3-85e8-730eaa4937ef" containerID="999e1c5fc7205e272920516843b7a49f17c34dee055b617341851205f64f23d8" exitCode=0 Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.133179 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n2qp8" event={"ID":"a6908a38-5adb-40c3-85e8-730eaa4937ef","Type":"ContainerDied","Data":"999e1c5fc7205e272920516843b7a49f17c34dee055b617341851205f64f23d8"} Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.151816 4923 patch_prober.go:28] interesting pod/downloads-7954f5f757-fd2jt container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.151858 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fd2jt" podUID="7638a2ec-d85c-456d-9d1b-9e56d83eae4b" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.160407 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.184434 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:04 crc kubenswrapper[4923]: E1128 11:11:04.185514 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:04.685492954 +0000 UTC m=+143.814177164 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.203290 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mqbzf" Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.290086 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:04 crc kubenswrapper[4923]: E1128 11:11:04.293603 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:04.793590541 +0000 UTC m=+143.922274751 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.384721 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" podStartSLOduration=125.384706207 podStartE2EDuration="2m5.384706207s" podCreationTimestamp="2025-11-28 11:08:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:04.347460734 +0000 UTC m=+143.476144944" watchObservedRunningTime="2025-11-28 11:11:04.384706207 +0000 UTC m=+143.513390417" Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.394793 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:04 crc kubenswrapper[4923]: E1128 11:11:04.395194 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:04.895182493 +0000 UTC m=+144.023866703 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.501158 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:04 crc kubenswrapper[4923]: E1128 11:11:04.501458 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:05.001447998 +0000 UTC m=+144.130132208 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.503674 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:04 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:04 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:04 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.503751 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.604477 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:04 crc kubenswrapper[4923]: E1128 11:11:04.604906 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:05.104892703 +0000 UTC m=+144.233576903 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.714601 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:04 crc kubenswrapper[4923]: E1128 11:11:04.714973 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:05.214960785 +0000 UTC m=+144.343644985 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.815353 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:04 crc kubenswrapper[4923]: E1128 11:11:04.815727 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:05.315705044 +0000 UTC m=+144.444389254 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.828783 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj" Nov 28 11:11:04 crc kubenswrapper[4923]: I1128 11:11:04.917547 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:04 crc kubenswrapper[4923]: E1128 11:11:04.918078 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 11:11:05.418065777 +0000 UTC m=+144.546749987 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-g855d" (UID: "6f176857-50d2-41c7-8237-961e330c629d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.018650 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89-config-volume\") pod \"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89\" (UID: \"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89\") " Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.018784 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.018848 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89-secret-volume\") pod \"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89\" (UID: \"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89\") " Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.018888 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mj9gd\" (UniqueName: \"kubernetes.io/projected/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89-kube-api-access-mj9gd\") pod \"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89\" (UID: \"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89\") " Nov 28 11:11:05 crc kubenswrapper[4923]: E1128 11:11:05.019121 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 11:11:05.519107424 +0000 UTC m=+144.647791634 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.019408 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89-config-volume" (OuterVolumeSpecName: "config-volume") pod "ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89" (UID: "ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.047264 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89-kube-api-access-mj9gd" (OuterVolumeSpecName: "kube-api-access-mj9gd") pod "ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89" (UID: "ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89"). InnerVolumeSpecName "kube-api-access-mj9gd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.047673 4923 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.054977 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89" (UID: "ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.064054 4923 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-28T11:11:05.047698162Z","Handler":null,"Name":""} Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.071520 4923 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.071551 4923 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.123476 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.123566 4923 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.123576 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mj9gd\" (UniqueName: \"kubernetes.io/projected/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89-kube-api-access-mj9gd\") on node \"crc\" DevicePath \"\"" Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.123586 4923 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.159692 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj" event={"ID":"ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89","Type":"ContainerDied","Data":"40e012068a17395f83088fa6ec7f47b618f03ac95cd16a344fdc38705c1c7093"} Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.159729 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="40e012068a17395f83088fa6ec7f47b618f03ac95cd16a344fdc38705c1c7093" Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.159746 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj" Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.165065 4923 patch_prober.go:28] interesting pod/downloads-7954f5f757-fd2jt container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.165116 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fd2jt" podUID="7638a2ec-d85c-456d-9d1b-9e56d83eae4b" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.171986 4923 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.172018 4923 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.389064 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-g855d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.432123 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.436248 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.466610 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.497417 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:05 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:05 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:05 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:05 crc kubenswrapper[4923]: I1128 11:11:05.497468 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:06 crc kubenswrapper[4923]: I1128 11:11:06.226372 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-g855d"] Nov 28 11:11:06 crc kubenswrapper[4923]: I1128 11:11:06.366246 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" event={"ID":"ce29e4b8-83fb-402d-a969-efa9106fdf29","Type":"ContainerStarted","Data":"072834626f182a6eec6392f1b00c1647cc5ee37e0ae95ce4cc60e6b8b0cf738e"} Nov 28 11:11:06 crc kubenswrapper[4923]: I1128 11:11:06.366289 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" event={"ID":"ce29e4b8-83fb-402d-a969-efa9106fdf29","Type":"ContainerStarted","Data":"0e65a8a5d97c669e71c72d141fcf1d803fdd38cf640996e45c32967f2167f4d8"} Nov 28 11:11:06 crc kubenswrapper[4923]: I1128 11:11:06.494413 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:11:06 crc kubenswrapper[4923]: I1128 11:11:06.498771 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:06 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:06 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:06 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:06 crc kubenswrapper[4923]: I1128 11:11:06.498803 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:06 crc kubenswrapper[4923]: I1128 11:11:06.503767 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:11:06 crc kubenswrapper[4923]: I1128 11:11:06.503788 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:11:06 crc kubenswrapper[4923]: I1128 11:11:06.511788 4923 patch_prober.go:28] interesting pod/apiserver-76f77b778f-lhbv8 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 28 11:11:06 crc kubenswrapper[4923]: [+]log ok Nov 28 11:11:06 crc kubenswrapper[4923]: [+]etcd ok Nov 28 11:11:06 crc kubenswrapper[4923]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 28 11:11:06 crc kubenswrapper[4923]: [+]poststarthook/generic-apiserver-start-informers ok Nov 28 11:11:06 crc kubenswrapper[4923]: [+]poststarthook/max-in-flight-filter ok Nov 28 11:11:06 crc kubenswrapper[4923]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 28 11:11:06 crc kubenswrapper[4923]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 28 11:11:06 crc kubenswrapper[4923]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 28 11:11:06 crc kubenswrapper[4923]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Nov 28 11:11:06 crc kubenswrapper[4923]: [+]poststarthook/project.openshift.io-projectcache ok Nov 28 11:11:06 crc kubenswrapper[4923]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 28 11:11:06 crc kubenswrapper[4923]: [+]poststarthook/openshift.io-startinformers ok Nov 28 11:11:06 crc kubenswrapper[4923]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 28 11:11:06 crc kubenswrapper[4923]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 28 11:11:06 crc kubenswrapper[4923]: livez check failed Nov 28 11:11:06 crc kubenswrapper[4923]: I1128 11:11:06.511816 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" podUID="37ddbfb0-c042-460d-b772-9cdd214a79a1" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:06 crc kubenswrapper[4923]: I1128 11:11:06.585492 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:11:06 crc kubenswrapper[4923]: I1128 11:11:06.585714 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:11:06 crc kubenswrapper[4923]: I1128 11:11:06.592040 4923 patch_prober.go:28] interesting pod/console-f9d7485db-2vsdg container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 28 11:11:06 crc kubenswrapper[4923]: I1128 11:11:06.592071 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-2vsdg" podUID="aa97fc63-7e09-4217-9fb9-78fca4703f04" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 28 11:11:06 crc kubenswrapper[4923]: I1128 11:11:06.713289 4923 patch_prober.go:28] interesting pod/downloads-7954f5f757-fd2jt container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Nov 28 11:11:06 crc kubenswrapper[4923]: I1128 11:11:06.713326 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fd2jt" podUID="7638a2ec-d85c-456d-9d1b-9e56d83eae4b" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Nov 28 11:11:06 crc kubenswrapper[4923]: I1128 11:11:06.718257 4923 patch_prober.go:28] interesting pod/downloads-7954f5f757-fd2jt container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Nov 28 11:11:06 crc kubenswrapper[4923]: I1128 11:11:06.718277 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-fd2jt" podUID="7638a2ec-d85c-456d-9d1b-9e56d83eae4b" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.053546 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.053593 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.059916 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.102744 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.159638 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.163242 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.186090 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.198274 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.242653 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.261444 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.268651 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.407434 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-g855d" event={"ID":"6f176857-50d2-41c7-8237-961e330c629d","Type":"ContainerStarted","Data":"13393fcac0183330276fc39d59fadecad01aff0e6a754171801322d18b7b2b6d"} Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.501723 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:07 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:07 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:07 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.501794 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.514990 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.809145 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-9x5dg" podStartSLOduration=15.809125993 podStartE2EDuration="15.809125993s" podCreationTimestamp="2025-11-28 11:10:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:06.40992665 +0000 UTC m=+145.538610860" watchObservedRunningTime="2025-11-28 11:11:07.809125993 +0000 UTC m=+146.937810203" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.819082 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 11:11:07 crc kubenswrapper[4923]: E1128 11:11:07.819408 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89" containerName="collect-profiles" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.819420 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89" containerName="collect-profiles" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.819550 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89" containerName="collect-profiles" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.820160 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.821891 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.823856 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.827225 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 28 11:11:07 crc kubenswrapper[4923]: W1128 11:11:07.945264 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-3dcc1ec90d8b3ecb20cd0db5503caec1bf8dc32dd20b76773c45f8f261202d54 WatchSource:0}: Error finding container 3dcc1ec90d8b3ecb20cd0db5503caec1bf8dc32dd20b76773c45f8f261202d54: Status 404 returned error can't find the container with id 3dcc1ec90d8b3ecb20cd0db5503caec1bf8dc32dd20b76773c45f8f261202d54 Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.985297 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/302df168-378e-47cf-bd87-45db0be14689-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"302df168-378e-47cf-bd87-45db0be14689\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 11:11:07 crc kubenswrapper[4923]: I1128 11:11:07.985370 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/302df168-378e-47cf-bd87-45db0be14689-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"302df168-378e-47cf-bd87-45db0be14689\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 11:11:08 crc kubenswrapper[4923]: W1128 11:11:08.029365 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-618a25a58508aeca80d212bf6f3462edd57c6ff6a104b30b04bbd60994f96f57 WatchSource:0}: Error finding container 618a25a58508aeca80d212bf6f3462edd57c6ff6a104b30b04bbd60994f96f57: Status 404 returned error can't find the container with id 618a25a58508aeca80d212bf6f3462edd57c6ff6a104b30b04bbd60994f96f57 Nov 28 11:11:08 crc kubenswrapper[4923]: I1128 11:11:08.087518 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/302df168-378e-47cf-bd87-45db0be14689-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"302df168-378e-47cf-bd87-45db0be14689\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 11:11:08 crc kubenswrapper[4923]: I1128 11:11:08.087776 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/302df168-378e-47cf-bd87-45db0be14689-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"302df168-378e-47cf-bd87-45db0be14689\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 11:11:08 crc kubenswrapper[4923]: I1128 11:11:08.087893 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/302df168-378e-47cf-bd87-45db0be14689-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"302df168-378e-47cf-bd87-45db0be14689\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 11:11:08 crc kubenswrapper[4923]: I1128 11:11:08.134815 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/302df168-378e-47cf-bd87-45db0be14689-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"302df168-378e-47cf-bd87-45db0be14689\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 11:11:08 crc kubenswrapper[4923]: I1128 11:11:08.143215 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 11:11:08 crc kubenswrapper[4923]: I1128 11:11:08.441545 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"80b93f3902a1d193728eab3c33ecbeefeee89259fc8b8e96db1c751fd1be974f"} Nov 28 11:11:08 crc kubenswrapper[4923]: I1128 11:11:08.458176 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-g855d" event={"ID":"6f176857-50d2-41c7-8237-961e330c629d","Type":"ContainerStarted","Data":"b5b5789b67b9671ae9e00dcfa57e90c7fa0e451e97bfde57dbba3c68139d4ec3"} Nov 28 11:11:08 crc kubenswrapper[4923]: I1128 11:11:08.459294 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:08 crc kubenswrapper[4923]: I1128 11:11:08.472509 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"618a25a58508aeca80d212bf6f3462edd57c6ff6a104b30b04bbd60994f96f57"} Nov 28 11:11:08 crc kubenswrapper[4923]: I1128 11:11:08.476300 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"3dcc1ec90d8b3ecb20cd0db5503caec1bf8dc32dd20b76773c45f8f261202d54"} Nov 28 11:11:08 crc kubenswrapper[4923]: I1128 11:11:08.481624 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-g855d" podStartSLOduration=128.481610397 podStartE2EDuration="2m8.481610397s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:08.480482565 +0000 UTC m=+147.609166775" watchObservedRunningTime="2025-11-28 11:11:08.481610397 +0000 UTC m=+147.610294607" Nov 28 11:11:08 crc kubenswrapper[4923]: I1128 11:11:08.497882 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:08 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:08 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:08 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:08 crc kubenswrapper[4923]: I1128 11:11:08.498006 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:08 crc kubenswrapper[4923]: I1128 11:11:08.915242 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.351632 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.352895 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.355136 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.355143 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.364378 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.419867 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bc5adbb2-17e1-41da-9041-0ce037b85424-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"bc5adbb2-17e1-41da-9041-0ce037b85424\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.419992 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bc5adbb2-17e1-41da-9041-0ce037b85424-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"bc5adbb2-17e1-41da-9041-0ce037b85424\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.484586 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"302df168-378e-47cf-bd87-45db0be14689","Type":"ContainerStarted","Data":"b1a92da37db2c4ab603f551330da9d50668ae11541126ac33d6eb8daaa82586c"} Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.492865 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"99fdd10d37239d596908e4275698c0649e68aae5e702a092d316d1dd15e86fee"} Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.497487 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"118abd4e78eff70dea2e2806e8c66b60a14abcc2289a97a20be6bba2dc3ccc60"} Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.497558 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.501892 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:09 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:09 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:09 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.501959 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.518504 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"8c068da72e4d08a90a6283246b062a8513165c77f2c40ea527e8507eff8e4e65"} Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.520647 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bc5adbb2-17e1-41da-9041-0ce037b85424-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"bc5adbb2-17e1-41da-9041-0ce037b85424\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.520689 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bc5adbb2-17e1-41da-9041-0ce037b85424-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"bc5adbb2-17e1-41da-9041-0ce037b85424\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.521206 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bc5adbb2-17e1-41da-9041-0ce037b85424-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"bc5adbb2-17e1-41da-9041-0ce037b85424\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.565580 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bc5adbb2-17e1-41da-9041-0ce037b85424-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"bc5adbb2-17e1-41da-9041-0ce037b85424\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 11:11:09 crc kubenswrapper[4923]: I1128 11:11:09.734676 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 11:11:10 crc kubenswrapper[4923]: I1128 11:11:10.240547 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 11:11:10 crc kubenswrapper[4923]: I1128 11:11:10.496130 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:10 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:10 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:10 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:10 crc kubenswrapper[4923]: I1128 11:11:10.496192 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:10 crc kubenswrapper[4923]: I1128 11:11:10.543610 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"bc5adbb2-17e1-41da-9041-0ce037b85424","Type":"ContainerStarted","Data":"a323a456e27661db3be43ca4fb911250e1e8336b5b0b88da1c13fc5e70daf623"} Nov 28 11:11:11 crc kubenswrapper[4923]: I1128 11:11:11.495405 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:11 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:11 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:11 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:11 crc kubenswrapper[4923]: I1128 11:11:11.495471 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:11 crc kubenswrapper[4923]: I1128 11:11:11.507961 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:11:11 crc kubenswrapper[4923]: I1128 11:11:11.520483 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-lhbv8" Nov 28 11:11:11 crc kubenswrapper[4923]: I1128 11:11:11.614581 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"bc5adbb2-17e1-41da-9041-0ce037b85424","Type":"ContainerStarted","Data":"5cdcc2a1664c9c70def600ba8b25a43da39bf579f5cb7f78eccb933b6c0cbc11"} Nov 28 11:11:11 crc kubenswrapper[4923]: I1128 11:11:11.619157 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"302df168-378e-47cf-bd87-45db0be14689","Type":"ContainerStarted","Data":"90406707a42ddb9b0386ab9a03dcd3ddb8fb012ababac387dbcaf969fb6fc70a"} Nov 28 11:11:11 crc kubenswrapper[4923]: I1128 11:11:11.662823 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.662806756 podStartE2EDuration="2.662806756s" podCreationTimestamp="2025-11-28 11:11:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:11.652517045 +0000 UTC m=+150.781201255" watchObservedRunningTime="2025-11-28 11:11:11.662806756 +0000 UTC m=+150.791490966" Nov 28 11:11:11 crc kubenswrapper[4923]: I1128 11:11:11.666441 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-m4cv2" Nov 28 11:11:11 crc kubenswrapper[4923]: I1128 11:11:11.694951 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=4.694923634 podStartE2EDuration="4.694923634s" podCreationTimestamp="2025-11-28 11:11:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:11.67142206 +0000 UTC m=+150.800106270" watchObservedRunningTime="2025-11-28 11:11:11.694923634 +0000 UTC m=+150.823607844" Nov 28 11:11:12 crc kubenswrapper[4923]: I1128 11:11:12.497858 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:12 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:12 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:12 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:12 crc kubenswrapper[4923]: I1128 11:11:12.498182 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:12 crc kubenswrapper[4923]: E1128 11:11:12.560802 4923 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-podbc5adbb2_17e1_41da_9041_0ce037b85424.slice/crio-conmon-5cdcc2a1664c9c70def600ba8b25a43da39bf579f5cb7f78eccb933b6c0cbc11.scope\": RecentStats: unable to find data in memory cache]" Nov 28 11:11:12 crc kubenswrapper[4923]: I1128 11:11:12.671361 4923 generic.go:334] "Generic (PLEG): container finished" podID="302df168-378e-47cf-bd87-45db0be14689" containerID="90406707a42ddb9b0386ab9a03dcd3ddb8fb012ababac387dbcaf969fb6fc70a" exitCode=0 Nov 28 11:11:12 crc kubenswrapper[4923]: I1128 11:11:12.671450 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"302df168-378e-47cf-bd87-45db0be14689","Type":"ContainerDied","Data":"90406707a42ddb9b0386ab9a03dcd3ddb8fb012ababac387dbcaf969fb6fc70a"} Nov 28 11:11:12 crc kubenswrapper[4923]: I1128 11:11:12.690585 4923 generic.go:334] "Generic (PLEG): container finished" podID="bc5adbb2-17e1-41da-9041-0ce037b85424" containerID="5cdcc2a1664c9c70def600ba8b25a43da39bf579f5cb7f78eccb933b6c0cbc11" exitCode=0 Nov 28 11:11:12 crc kubenswrapper[4923]: I1128 11:11:12.690630 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"bc5adbb2-17e1-41da-9041-0ce037b85424","Type":"ContainerDied","Data":"5cdcc2a1664c9c70def600ba8b25a43da39bf579f5cb7f78eccb933b6c0cbc11"} Nov 28 11:11:13 crc kubenswrapper[4923]: I1128 11:11:13.496246 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:13 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:13 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:13 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:13 crc kubenswrapper[4923]: I1128 11:11:13.496298 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:14 crc kubenswrapper[4923]: I1128 11:11:14.026539 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:11:14 crc kubenswrapper[4923]: I1128 11:11:14.026786 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:11:14 crc kubenswrapper[4923]: I1128 11:11:14.495698 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:14 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:14 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:14 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:14 crc kubenswrapper[4923]: I1128 11:11:14.495746 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:15 crc kubenswrapper[4923]: I1128 11:11:15.495610 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:15 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:15 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:15 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:15 crc kubenswrapper[4923]: I1128 11:11:15.495654 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:15 crc kubenswrapper[4923]: I1128 11:11:15.544890 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:11:16 crc kubenswrapper[4923]: I1128 11:11:16.495858 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:16 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:16 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:16 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:16 crc kubenswrapper[4923]: I1128 11:11:16.496133 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:16 crc kubenswrapper[4923]: I1128 11:11:16.585205 4923 patch_prober.go:28] interesting pod/console-f9d7485db-2vsdg container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 28 11:11:16 crc kubenswrapper[4923]: I1128 11:11:16.585260 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-2vsdg" podUID="aa97fc63-7e09-4217-9fb9-78fca4703f04" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 28 11:11:16 crc kubenswrapper[4923]: I1128 11:11:16.717354 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-fd2jt" Nov 28 11:11:17 crc kubenswrapper[4923]: I1128 11:11:17.497146 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:17 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:17 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:17 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:17 crc kubenswrapper[4923]: I1128 11:11:17.497198 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:18 crc kubenswrapper[4923]: I1128 11:11:18.495286 4923 patch_prober.go:28] interesting pod/router-default-5444994796-dfffg container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 11:11:18 crc kubenswrapper[4923]: [-]has-synced failed: reason withheld Nov 28 11:11:18 crc kubenswrapper[4923]: [+]process-running ok Nov 28 11:11:18 crc kubenswrapper[4923]: healthz check failed Nov 28 11:11:18 crc kubenswrapper[4923]: I1128 11:11:18.495348 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dfffg" podUID="a49569a7-dda0-4856-816e-296642ddbdff" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 11:11:19 crc kubenswrapper[4923]: I1128 11:11:19.496982 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:11:19 crc kubenswrapper[4923]: I1128 11:11:19.504965 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-dfffg" Nov 28 11:11:22 crc kubenswrapper[4923]: I1128 11:11:22.857906 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs\") pod \"network-metrics-daemon-g2kmb\" (UID: \"b483d037-b692-45d5-bb83-02e029649100\") " pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:11:22 crc kubenswrapper[4923]: I1128 11:11:22.862432 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b483d037-b692-45d5-bb83-02e029649100-metrics-certs\") pod \"network-metrics-daemon-g2kmb\" (UID: \"b483d037-b692-45d5-bb83-02e029649100\") " pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:11:23 crc kubenswrapper[4923]: I1128 11:11:23.131588 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-g2kmb" Nov 28 11:11:25 crc kubenswrapper[4923]: I1128 11:11:25.474566 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:11:26 crc kubenswrapper[4923]: I1128 11:11:26.590856 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:11:26 crc kubenswrapper[4923]: I1128 11:11:26.596148 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:11:36 crc kubenswrapper[4923]: I1128 11:11:36.617903 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cnw7p" Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.082321 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.089511 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.200973 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bc5adbb2-17e1-41da-9041-0ce037b85424-kube-api-access\") pod \"bc5adbb2-17e1-41da-9041-0ce037b85424\" (UID: \"bc5adbb2-17e1-41da-9041-0ce037b85424\") " Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.201222 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/302df168-378e-47cf-bd87-45db0be14689-kubelet-dir\") pod \"302df168-378e-47cf-bd87-45db0be14689\" (UID: \"302df168-378e-47cf-bd87-45db0be14689\") " Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.201286 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bc5adbb2-17e1-41da-9041-0ce037b85424-kubelet-dir\") pod \"bc5adbb2-17e1-41da-9041-0ce037b85424\" (UID: \"bc5adbb2-17e1-41da-9041-0ce037b85424\") " Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.201352 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/302df168-378e-47cf-bd87-45db0be14689-kube-api-access\") pod \"302df168-378e-47cf-bd87-45db0be14689\" (UID: \"302df168-378e-47cf-bd87-45db0be14689\") " Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.201407 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/302df168-378e-47cf-bd87-45db0be14689-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "302df168-378e-47cf-bd87-45db0be14689" (UID: "302df168-378e-47cf-bd87-45db0be14689"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.201462 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc5adbb2-17e1-41da-9041-0ce037b85424-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "bc5adbb2-17e1-41da-9041-0ce037b85424" (UID: "bc5adbb2-17e1-41da-9041-0ce037b85424"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.201812 4923 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/302df168-378e-47cf-bd87-45db0be14689-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.201839 4923 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bc5adbb2-17e1-41da-9041-0ce037b85424-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.208574 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5adbb2-17e1-41da-9041-0ce037b85424-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "bc5adbb2-17e1-41da-9041-0ce037b85424" (UID: "bc5adbb2-17e1-41da-9041-0ce037b85424"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.217112 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/302df168-378e-47cf-bd87-45db0be14689-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "302df168-378e-47cf-bd87-45db0be14689" (UID: "302df168-378e-47cf-bd87-45db0be14689"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.302623 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bc5adbb2-17e1-41da-9041-0ce037b85424-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.302652 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/302df168-378e-47cf-bd87-45db0be14689-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.860046 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"302df168-378e-47cf-bd87-45db0be14689","Type":"ContainerDied","Data":"b1a92da37db2c4ab603f551330da9d50668ae11541126ac33d6eb8daaa82586c"} Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.860103 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1a92da37db2c4ab603f551330da9d50668ae11541126ac33d6eb8daaa82586c" Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.860058 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.861413 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"bc5adbb2-17e1-41da-9041-0ce037b85424","Type":"ContainerDied","Data":"a323a456e27661db3be43ca4fb911250e1e8336b5b0b88da1c13fc5e70daf623"} Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.861437 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a323a456e27661db3be43ca4fb911250e1e8336b5b0b88da1c13fc5e70daf623" Nov 28 11:11:40 crc kubenswrapper[4923]: I1128 11:11:40.861467 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 11:11:43 crc kubenswrapper[4923]: I1128 11:11:43.188664 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 11:11:43 crc kubenswrapper[4923]: E1128 11:11:43.189882 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="302df168-378e-47cf-bd87-45db0be14689" containerName="pruner" Nov 28 11:11:43 crc kubenswrapper[4923]: I1128 11:11:43.189905 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="302df168-378e-47cf-bd87-45db0be14689" containerName="pruner" Nov 28 11:11:43 crc kubenswrapper[4923]: E1128 11:11:43.190169 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc5adbb2-17e1-41da-9041-0ce037b85424" containerName="pruner" Nov 28 11:11:43 crc kubenswrapper[4923]: I1128 11:11:43.190184 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc5adbb2-17e1-41da-9041-0ce037b85424" containerName="pruner" Nov 28 11:11:43 crc kubenswrapper[4923]: I1128 11:11:43.190516 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc5adbb2-17e1-41da-9041-0ce037b85424" containerName="pruner" Nov 28 11:11:43 crc kubenswrapper[4923]: I1128 11:11:43.190539 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="302df168-378e-47cf-bd87-45db0be14689" containerName="pruner" Nov 28 11:11:43 crc kubenswrapper[4923]: I1128 11:11:43.192459 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 11:11:43 crc kubenswrapper[4923]: I1128 11:11:43.193818 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 11:11:43 crc kubenswrapper[4923]: I1128 11:11:43.196476 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 28 11:11:43 crc kubenswrapper[4923]: I1128 11:11:43.197416 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 28 11:11:43 crc kubenswrapper[4923]: I1128 11:11:43.356777 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/15d2531b-dca2-400b-8a64-63a946c97693-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"15d2531b-dca2-400b-8a64-63a946c97693\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 11:11:43 crc kubenswrapper[4923]: I1128 11:11:43.356922 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/15d2531b-dca2-400b-8a64-63a946c97693-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"15d2531b-dca2-400b-8a64-63a946c97693\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 11:11:43 crc kubenswrapper[4923]: I1128 11:11:43.458289 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/15d2531b-dca2-400b-8a64-63a946c97693-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"15d2531b-dca2-400b-8a64-63a946c97693\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 11:11:43 crc kubenswrapper[4923]: I1128 11:11:43.458386 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/15d2531b-dca2-400b-8a64-63a946c97693-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"15d2531b-dca2-400b-8a64-63a946c97693\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 11:11:43 crc kubenswrapper[4923]: I1128 11:11:43.458475 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/15d2531b-dca2-400b-8a64-63a946c97693-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"15d2531b-dca2-400b-8a64-63a946c97693\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 11:11:43 crc kubenswrapper[4923]: I1128 11:11:43.495654 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/15d2531b-dca2-400b-8a64-63a946c97693-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"15d2531b-dca2-400b-8a64-63a946c97693\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 11:11:43 crc kubenswrapper[4923]: I1128 11:11:43.548905 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 11:11:44 crc kubenswrapper[4923]: I1128 11:11:44.026859 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:11:44 crc kubenswrapper[4923]: I1128 11:11:44.027002 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:11:45 crc kubenswrapper[4923]: E1128 11:11:45.614851 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 28 11:11:45 crc kubenswrapper[4923]: E1128 11:11:45.615468 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q99hs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-fv729_openshift-marketplace(2db4e8e9-8919-4b13-afef-835484cc865a): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 11:11:45 crc kubenswrapper[4923]: E1128 11:11:45.617348 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-fv729" podUID="2db4e8e9-8919-4b13-afef-835484cc865a" Nov 28 11:11:47 crc kubenswrapper[4923]: I1128 11:11:47.246831 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 11:11:47 crc kubenswrapper[4923]: I1128 11:11:47.353180 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 11:11:47 crc kubenswrapper[4923]: I1128 11:11:47.354652 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 11:11:47 crc kubenswrapper[4923]: I1128 11:11:47.362414 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 11:11:47 crc kubenswrapper[4923]: I1128 11:11:47.514256 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffd50eaf-eb37-40b2-80b0-a7f71f7371f8-kube-api-access\") pod \"installer-9-crc\" (UID: \"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 11:11:47 crc kubenswrapper[4923]: I1128 11:11:47.514310 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/ffd50eaf-eb37-40b2-80b0-a7f71f7371f8-var-lock\") pod \"installer-9-crc\" (UID: \"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 11:11:47 crc kubenswrapper[4923]: I1128 11:11:47.514379 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ffd50eaf-eb37-40b2-80b0-a7f71f7371f8-kubelet-dir\") pod \"installer-9-crc\" (UID: \"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 11:11:47 crc kubenswrapper[4923]: I1128 11:11:47.615434 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ffd50eaf-eb37-40b2-80b0-a7f71f7371f8-kubelet-dir\") pod \"installer-9-crc\" (UID: \"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 11:11:47 crc kubenswrapper[4923]: I1128 11:11:47.615509 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffd50eaf-eb37-40b2-80b0-a7f71f7371f8-kube-api-access\") pod \"installer-9-crc\" (UID: \"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 11:11:47 crc kubenswrapper[4923]: I1128 11:11:47.615536 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/ffd50eaf-eb37-40b2-80b0-a7f71f7371f8-var-lock\") pod \"installer-9-crc\" (UID: \"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 11:11:47 crc kubenswrapper[4923]: I1128 11:11:47.615611 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/ffd50eaf-eb37-40b2-80b0-a7f71f7371f8-var-lock\") pod \"installer-9-crc\" (UID: \"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 11:11:47 crc kubenswrapper[4923]: I1128 11:11:47.615645 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ffd50eaf-eb37-40b2-80b0-a7f71f7371f8-kubelet-dir\") pod \"installer-9-crc\" (UID: \"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 11:11:47 crc kubenswrapper[4923]: I1128 11:11:47.632688 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffd50eaf-eb37-40b2-80b0-a7f71f7371f8-kube-api-access\") pod \"installer-9-crc\" (UID: \"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 11:11:47 crc kubenswrapper[4923]: I1128 11:11:47.672758 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 11:11:48 crc kubenswrapper[4923]: E1128 11:11:48.957175 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-fv729" podUID="2db4e8e9-8919-4b13-afef-835484cc865a" Nov 28 11:11:49 crc kubenswrapper[4923]: E1128 11:11:49.062457 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 28 11:11:49 crc kubenswrapper[4923]: E1128 11:11:49.062687 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wsdls,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-qkkh6_openshift-marketplace(39eebecc-004d-445a-ac63-fad7bc311127): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 11:11:49 crc kubenswrapper[4923]: E1128 11:11:49.064163 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-qkkh6" podUID="39eebecc-004d-445a-ac63-fad7bc311127" Nov 28 11:11:49 crc kubenswrapper[4923]: E1128 11:11:49.064401 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 28 11:11:49 crc kubenswrapper[4923]: E1128 11:11:49.064550 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-v77lg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-h4kzb_openshift-marketplace(4fa3aef4-051e-48bb-91a3-bd8bd4c2befb): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 11:11:49 crc kubenswrapper[4923]: E1128 11:11:49.065685 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-h4kzb" podUID="4fa3aef4-051e-48bb-91a3-bd8bd4c2befb" Nov 28 11:11:50 crc kubenswrapper[4923]: E1128 11:11:50.456241 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-qkkh6" podUID="39eebecc-004d-445a-ac63-fad7bc311127" Nov 28 11:11:50 crc kubenswrapper[4923]: E1128 11:11:50.456257 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-h4kzb" podUID="4fa3aef4-051e-48bb-91a3-bd8bd4c2befb" Nov 28 11:11:50 crc kubenswrapper[4923]: E1128 11:11:50.522098 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 28 11:11:50 crc kubenswrapper[4923]: E1128 11:11:50.522249 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7f9kp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-js2pf_openshift-marketplace(f54a8053-81cc-429f-b68e-87a3fd245263): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 11:11:50 crc kubenswrapper[4923]: E1128 11:11:50.523425 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-js2pf" podUID="f54a8053-81cc-429f-b68e-87a3fd245263" Nov 28 11:11:50 crc kubenswrapper[4923]: E1128 11:11:50.537560 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 28 11:11:50 crc kubenswrapper[4923]: E1128 11:11:50.537747 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9dd64,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-n2qp8_openshift-marketplace(a6908a38-5adb-40c3-85e8-730eaa4937ef): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 11:11:50 crc kubenswrapper[4923]: E1128 11:11:50.538909 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-n2qp8" podUID="a6908a38-5adb-40c3-85e8-730eaa4937ef" Nov 28 11:11:52 crc kubenswrapper[4923]: E1128 11:11:52.263871 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-n2qp8" podUID="a6908a38-5adb-40c3-85e8-730eaa4937ef" Nov 28 11:11:52 crc kubenswrapper[4923]: E1128 11:11:52.264153 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-js2pf" podUID="f54a8053-81cc-429f-b68e-87a3fd245263" Nov 28 11:11:52 crc kubenswrapper[4923]: E1128 11:11:52.350905 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 28 11:11:52 crc kubenswrapper[4923]: E1128 11:11:52.351180 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zlhhb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-46sx6_openshift-marketplace(cffefe1d-9522-408d-aadf-c688411908e1): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 11:11:52 crc kubenswrapper[4923]: E1128 11:11:52.353177 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-46sx6" podUID="cffefe1d-9522-408d-aadf-c688411908e1" Nov 28 11:11:52 crc kubenswrapper[4923]: E1128 11:11:52.378797 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 28 11:11:52 crc kubenswrapper[4923]: E1128 11:11:52.378923 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vrb2k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-tdzqr_openshift-marketplace(e28cae41-ead3-4395-a457-3077c92068ca): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 11:11:52 crc kubenswrapper[4923]: E1128 11:11:52.381355 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-tdzqr" podUID="e28cae41-ead3-4395-a457-3077c92068ca" Nov 28 11:11:52 crc kubenswrapper[4923]: E1128 11:11:52.412961 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 28 11:11:52 crc kubenswrapper[4923]: E1128 11:11:52.413140 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-k2bjs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-l4xf8_openshift-marketplace(84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 11:11:52 crc kubenswrapper[4923]: E1128 11:11:52.414303 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-l4xf8" podUID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" Nov 28 11:11:52 crc kubenswrapper[4923]: I1128 11:11:52.687360 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-g2kmb"] Nov 28 11:11:52 crc kubenswrapper[4923]: I1128 11:11:52.735426 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 11:11:52 crc kubenswrapper[4923]: W1128 11:11:52.760209 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod15d2531b_dca2_400b_8a64_63a946c97693.slice/crio-af4418032727ef56a101a4d7167b99ff0dfd0e72babd942ff1afee7d58566a7b WatchSource:0}: Error finding container af4418032727ef56a101a4d7167b99ff0dfd0e72babd942ff1afee7d58566a7b: Status 404 returned error can't find the container with id af4418032727ef56a101a4d7167b99ff0dfd0e72babd942ff1afee7d58566a7b Nov 28 11:11:52 crc kubenswrapper[4923]: I1128 11:11:52.794911 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 11:11:52 crc kubenswrapper[4923]: I1128 11:11:52.930404 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" event={"ID":"b483d037-b692-45d5-bb83-02e029649100","Type":"ContainerStarted","Data":"39cf8bac27af8c73eda8baf66176a21812573842545cdea90b813181ca2614c6"} Nov 28 11:11:52 crc kubenswrapper[4923]: I1128 11:11:52.932571 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8","Type":"ContainerStarted","Data":"a09f4bcb60f6eac6a172225cb1a72c3b636a1e72bdeebf8de459f2919c30a331"} Nov 28 11:11:52 crc kubenswrapper[4923]: I1128 11:11:52.934740 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"15d2531b-dca2-400b-8a64-63a946c97693","Type":"ContainerStarted","Data":"af4418032727ef56a101a4d7167b99ff0dfd0e72babd942ff1afee7d58566a7b"} Nov 28 11:11:52 crc kubenswrapper[4923]: E1128 11:11:52.935776 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-l4xf8" podUID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" Nov 28 11:11:52 crc kubenswrapper[4923]: E1128 11:11:52.935955 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-tdzqr" podUID="e28cae41-ead3-4395-a457-3077c92068ca" Nov 28 11:11:52 crc kubenswrapper[4923]: E1128 11:11:52.936030 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-46sx6" podUID="cffefe1d-9522-408d-aadf-c688411908e1" Nov 28 11:11:53 crc kubenswrapper[4923]: I1128 11:11:53.942192 4923 generic.go:334] "Generic (PLEG): container finished" podID="15d2531b-dca2-400b-8a64-63a946c97693" containerID="8a27fd46544886550349c7f777141b7b62405c8ef2c6fae110768aca3a9ce942" exitCode=0 Nov 28 11:11:53 crc kubenswrapper[4923]: I1128 11:11:53.944359 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"15d2531b-dca2-400b-8a64-63a946c97693","Type":"ContainerDied","Data":"8a27fd46544886550349c7f777141b7b62405c8ef2c6fae110768aca3a9ce942"} Nov 28 11:11:53 crc kubenswrapper[4923]: I1128 11:11:53.946611 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" event={"ID":"b483d037-b692-45d5-bb83-02e029649100","Type":"ContainerStarted","Data":"9a8355dcfa6bc9435a28557eb1482aae9c2bcc6fc313c6f25f5440238e2ec4ca"} Nov 28 11:11:53 crc kubenswrapper[4923]: I1128 11:11:53.946806 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-g2kmb" event={"ID":"b483d037-b692-45d5-bb83-02e029649100","Type":"ContainerStarted","Data":"4bc18127f970ae8ef8dd07d3c7665ac82302fbf2949d8f06d09d734e34442ba7"} Nov 28 11:11:53 crc kubenswrapper[4923]: I1128 11:11:53.948860 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8","Type":"ContainerStarted","Data":"c9f0deea7ba5b7efd2e37fe1291080f52b70eb99e2ebf922ad287e9e2ea10961"} Nov 28 11:11:53 crc kubenswrapper[4923]: I1128 11:11:53.990526 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=6.990499929 podStartE2EDuration="6.990499929s" podCreationTimestamp="2025-11-28 11:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:53.989054958 +0000 UTC m=+193.117739168" watchObservedRunningTime="2025-11-28 11:11:53.990499929 +0000 UTC m=+193.119184149" Nov 28 11:11:54 crc kubenswrapper[4923]: I1128 11:11:54.012680 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-g2kmb" podStartSLOduration=174.012660175 podStartE2EDuration="2m54.012660175s" podCreationTimestamp="2025-11-28 11:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:11:54.00928113 +0000 UTC m=+193.137965380" watchObservedRunningTime="2025-11-28 11:11:54.012660175 +0000 UTC m=+193.141344395" Nov 28 11:11:55 crc kubenswrapper[4923]: I1128 11:11:55.187965 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 11:11:55 crc kubenswrapper[4923]: I1128 11:11:55.317233 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/15d2531b-dca2-400b-8a64-63a946c97693-kube-api-access\") pod \"15d2531b-dca2-400b-8a64-63a946c97693\" (UID: \"15d2531b-dca2-400b-8a64-63a946c97693\") " Nov 28 11:11:55 crc kubenswrapper[4923]: I1128 11:11:55.317293 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/15d2531b-dca2-400b-8a64-63a946c97693-kubelet-dir\") pod \"15d2531b-dca2-400b-8a64-63a946c97693\" (UID: \"15d2531b-dca2-400b-8a64-63a946c97693\") " Nov 28 11:11:55 crc kubenswrapper[4923]: I1128 11:11:55.317506 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/15d2531b-dca2-400b-8a64-63a946c97693-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "15d2531b-dca2-400b-8a64-63a946c97693" (UID: "15d2531b-dca2-400b-8a64-63a946c97693"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:11:55 crc kubenswrapper[4923]: I1128 11:11:55.325323 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15d2531b-dca2-400b-8a64-63a946c97693-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "15d2531b-dca2-400b-8a64-63a946c97693" (UID: "15d2531b-dca2-400b-8a64-63a946c97693"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:11:55 crc kubenswrapper[4923]: I1128 11:11:55.418227 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/15d2531b-dca2-400b-8a64-63a946c97693-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 11:11:55 crc kubenswrapper[4923]: I1128 11:11:55.418254 4923 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/15d2531b-dca2-400b-8a64-63a946c97693-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 11:11:55 crc kubenswrapper[4923]: I1128 11:11:55.961691 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"15d2531b-dca2-400b-8a64-63a946c97693","Type":"ContainerDied","Data":"af4418032727ef56a101a4d7167b99ff0dfd0e72babd942ff1afee7d58566a7b"} Nov 28 11:11:55 crc kubenswrapper[4923]: I1128 11:11:55.961947 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="af4418032727ef56a101a4d7167b99ff0dfd0e72babd942ff1afee7d58566a7b" Nov 28 11:11:55 crc kubenswrapper[4923]: I1128 11:11:55.961758 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 11:12:03 crc kubenswrapper[4923]: I1128 11:12:03.860510 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-28cv6"] Nov 28 11:12:04 crc kubenswrapper[4923]: I1128 11:12:04.002194 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fv729" event={"ID":"2db4e8e9-8919-4b13-afef-835484cc865a","Type":"ContainerStarted","Data":"18f471e6bb8048080a79fac9e5f6ec4d5a9895606d9830c15af5f3ff4f1cda50"} Nov 28 11:12:05 crc kubenswrapper[4923]: I1128 11:12:05.010737 4923 generic.go:334] "Generic (PLEG): container finished" podID="2db4e8e9-8919-4b13-afef-835484cc865a" containerID="18f471e6bb8048080a79fac9e5f6ec4d5a9895606d9830c15af5f3ff4f1cda50" exitCode=0 Nov 28 11:12:05 crc kubenswrapper[4923]: I1128 11:12:05.010782 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fv729" event={"ID":"2db4e8e9-8919-4b13-afef-835484cc865a","Type":"ContainerDied","Data":"18f471e6bb8048080a79fac9e5f6ec4d5a9895606d9830c15af5f3ff4f1cda50"} Nov 28 11:12:06 crc kubenswrapper[4923]: I1128 11:12:06.017696 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fv729" event={"ID":"2db4e8e9-8919-4b13-afef-835484cc865a","Type":"ContainerStarted","Data":"96be14820db021fd54876d1dfaa8d5a3e3e3fa4bab6a6e5b157947cd7dff7fa1"} Nov 28 11:12:06 crc kubenswrapper[4923]: I1128 11:12:06.020689 4923 generic.go:334] "Generic (PLEG): container finished" podID="a6908a38-5adb-40c3-85e8-730eaa4937ef" containerID="2bd37d0797bd440a3d8260443928e2164e028b9a7a7b71341a218589a90d76c8" exitCode=0 Nov 28 11:12:06 crc kubenswrapper[4923]: I1128 11:12:06.020727 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n2qp8" event={"ID":"a6908a38-5adb-40c3-85e8-730eaa4937ef","Type":"ContainerDied","Data":"2bd37d0797bd440a3d8260443928e2164e028b9a7a7b71341a218589a90d76c8"} Nov 28 11:12:06 crc kubenswrapper[4923]: I1128 11:12:06.024530 4923 generic.go:334] "Generic (PLEG): container finished" podID="e28cae41-ead3-4395-a457-3077c92068ca" containerID="33fa8b514aa8e1c4992dc062a341f8471806d64935278ff9d23c29d3e235959d" exitCode=0 Nov 28 11:12:06 crc kubenswrapper[4923]: I1128 11:12:06.024576 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdzqr" event={"ID":"e28cae41-ead3-4395-a457-3077c92068ca","Type":"ContainerDied","Data":"33fa8b514aa8e1c4992dc062a341f8471806d64935278ff9d23c29d3e235959d"} Nov 28 11:12:06 crc kubenswrapper[4923]: I1128 11:12:06.026369 4923 generic.go:334] "Generic (PLEG): container finished" podID="4fa3aef4-051e-48bb-91a3-bd8bd4c2befb" containerID="474d877990879bb10f9af5ad927d4d81381e15b765fd460873134349006259fd" exitCode=0 Nov 28 11:12:06 crc kubenswrapper[4923]: I1128 11:12:06.026416 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h4kzb" event={"ID":"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb","Type":"ContainerDied","Data":"474d877990879bb10f9af5ad927d4d81381e15b765fd460873134349006259fd"} Nov 28 11:12:07 crc kubenswrapper[4923]: I1128 11:12:07.032544 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkkh6" event={"ID":"39eebecc-004d-445a-ac63-fad7bc311127","Type":"ContainerStarted","Data":"97dd0d28a0b522fde607b02a45bd9047813d56366c10ee03121fcce1ad26db65"} Nov 28 11:12:07 crc kubenswrapper[4923]: I1128 11:12:07.070245 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fv729" podStartSLOduration=5.677689026 podStartE2EDuration="1m7.070230032s" podCreationTimestamp="2025-11-28 11:11:00 +0000 UTC" firstStartedPulling="2025-11-28 11:11:04.050922009 +0000 UTC m=+143.179606209" lastFinishedPulling="2025-11-28 11:12:05.443463005 +0000 UTC m=+204.572147215" observedRunningTime="2025-11-28 11:12:07.05123818 +0000 UTC m=+206.179922390" watchObservedRunningTime="2025-11-28 11:12:07.070230032 +0000 UTC m=+206.198914242" Nov 28 11:12:08 crc kubenswrapper[4923]: I1128 11:12:08.037885 4923 generic.go:334] "Generic (PLEG): container finished" podID="39eebecc-004d-445a-ac63-fad7bc311127" containerID="97dd0d28a0b522fde607b02a45bd9047813d56366c10ee03121fcce1ad26db65" exitCode=0 Nov 28 11:12:08 crc kubenswrapper[4923]: I1128 11:12:08.037951 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkkh6" event={"ID":"39eebecc-004d-445a-ac63-fad7bc311127","Type":"ContainerDied","Data":"97dd0d28a0b522fde607b02a45bd9047813d56366c10ee03121fcce1ad26db65"} Nov 28 11:12:08 crc kubenswrapper[4923]: I1128 11:12:08.042817 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n2qp8" event={"ID":"a6908a38-5adb-40c3-85e8-730eaa4937ef","Type":"ContainerStarted","Data":"f0425a114843f6e320533c00f2604a9fdeeca0c1005fb0a2f418a5ce1f933d2e"} Nov 28 11:12:08 crc kubenswrapper[4923]: I1128 11:12:08.045721 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h4kzb" event={"ID":"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb","Type":"ContainerStarted","Data":"275497f17c7cd70692ddcfe3bd1041e81dc6e2a6076c1366a056832dd495d527"} Nov 28 11:12:09 crc kubenswrapper[4923]: I1128 11:12:09.069851 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-n2qp8" podStartSLOduration=5.220764366 podStartE2EDuration="1m9.069834638s" podCreationTimestamp="2025-11-28 11:11:00 +0000 UTC" firstStartedPulling="2025-11-28 11:11:02.720200233 +0000 UTC m=+141.848884443" lastFinishedPulling="2025-11-28 11:12:06.569270505 +0000 UTC m=+205.697954715" observedRunningTime="2025-11-28 11:12:09.069400746 +0000 UTC m=+208.198084946" watchObservedRunningTime="2025-11-28 11:12:09.069834638 +0000 UTC m=+208.198518848" Nov 28 11:12:09 crc kubenswrapper[4923]: I1128 11:12:09.092111 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-h4kzb" podStartSLOduration=5.508304809 podStartE2EDuration="1m8.092101254s" podCreationTimestamp="2025-11-28 11:11:01 +0000 UTC" firstStartedPulling="2025-11-28 11:11:03.978663606 +0000 UTC m=+143.107347816" lastFinishedPulling="2025-11-28 11:12:06.562460051 +0000 UTC m=+205.691144261" observedRunningTime="2025-11-28 11:12:09.089621393 +0000 UTC m=+208.218305603" watchObservedRunningTime="2025-11-28 11:12:09.092101254 +0000 UTC m=+208.220785464" Nov 28 11:12:10 crc kubenswrapper[4923]: I1128 11:12:10.353882 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-n2qp8" Nov 28 11:12:10 crc kubenswrapper[4923]: I1128 11:12:10.354273 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-n2qp8" Nov 28 11:12:10 crc kubenswrapper[4923]: I1128 11:12:10.861533 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fv729" Nov 28 11:12:10 crc kubenswrapper[4923]: I1128 11:12:10.862007 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fv729" Nov 28 11:12:10 crc kubenswrapper[4923]: I1128 11:12:10.915884 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fv729" Nov 28 11:12:10 crc kubenswrapper[4923]: I1128 11:12:10.931887 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-n2qp8" Nov 28 11:12:11 crc kubenswrapper[4923]: I1128 11:12:11.092189 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fv729" Nov 28 11:12:12 crc kubenswrapper[4923]: I1128 11:12:12.032252 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fv729"] Nov 28 11:12:12 crc kubenswrapper[4923]: I1128 11:12:12.221094 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-h4kzb" Nov 28 11:12:12 crc kubenswrapper[4923]: I1128 11:12:12.221160 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-h4kzb" Nov 28 11:12:13 crc kubenswrapper[4923]: I1128 11:12:13.071720 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdzqr" event={"ID":"e28cae41-ead3-4395-a457-3077c92068ca","Type":"ContainerStarted","Data":"59bfc149acda33960396f2a0210b66218a455e17699f1faaf34469c03a0adad0"} Nov 28 11:12:13 crc kubenswrapper[4923]: I1128 11:12:13.072069 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fv729" podUID="2db4e8e9-8919-4b13-afef-835484cc865a" containerName="registry-server" containerID="cri-o://96be14820db021fd54876d1dfaa8d5a3e3e3fa4bab6a6e5b157947cd7dff7fa1" gracePeriod=2 Nov 28 11:12:13 crc kubenswrapper[4923]: I1128 11:12:13.266984 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h4kzb" podUID="4fa3aef4-051e-48bb-91a3-bd8bd4c2befb" containerName="registry-server" probeResult="failure" output=< Nov 28 11:12:13 crc kubenswrapper[4923]: timeout: failed to connect service ":50051" within 1s Nov 28 11:12:13 crc kubenswrapper[4923]: > Nov 28 11:12:14 crc kubenswrapper[4923]: I1128 11:12:14.026979 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:12:14 crc kubenswrapper[4923]: I1128 11:12:14.027058 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:12:14 crc kubenswrapper[4923]: I1128 11:12:14.027122 4923 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:12:14 crc kubenswrapper[4923]: I1128 11:12:14.027958 4923 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9"} pod="openshift-machine-config-operator/machine-config-daemon-bwdth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 11:12:14 crc kubenswrapper[4923]: I1128 11:12:14.028111 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" containerID="cri-o://9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9" gracePeriod=600 Nov 28 11:12:14 crc kubenswrapper[4923]: I1128 11:12:14.107324 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tdzqr" podStartSLOduration=7.275709712 podStartE2EDuration="1m16.10730055s" podCreationTimestamp="2025-11-28 11:10:58 +0000 UTC" firstStartedPulling="2025-11-28 11:11:02.621995206 +0000 UTC m=+141.750679426" lastFinishedPulling="2025-11-28 11:12:11.453586054 +0000 UTC m=+210.582270264" observedRunningTime="2025-11-28 11:12:14.101683019 +0000 UTC m=+213.230367269" watchObservedRunningTime="2025-11-28 11:12:14.10730055 +0000 UTC m=+213.235984800" Nov 28 11:12:16 crc kubenswrapper[4923]: I1128 11:12:16.928455 4923 generic.go:334] "Generic (PLEG): container finished" podID="2db4e8e9-8919-4b13-afef-835484cc865a" containerID="96be14820db021fd54876d1dfaa8d5a3e3e3fa4bab6a6e5b157947cd7dff7fa1" exitCode=0 Nov 28 11:12:16 crc kubenswrapper[4923]: I1128 11:12:16.928547 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fv729" event={"ID":"2db4e8e9-8919-4b13-afef-835484cc865a","Type":"ContainerDied","Data":"96be14820db021fd54876d1dfaa8d5a3e3e3fa4bab6a6e5b157947cd7dff7fa1"} Nov 28 11:12:16 crc kubenswrapper[4923]: I1128 11:12:16.931352 4923 generic.go:334] "Generic (PLEG): container finished" podID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerID="9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9" exitCode=0 Nov 28 11:12:16 crc kubenswrapper[4923]: I1128 11:12:16.931390 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerDied","Data":"9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9"} Nov 28 11:12:18 crc kubenswrapper[4923]: I1128 11:12:18.996168 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tdzqr" Nov 28 11:12:18 crc kubenswrapper[4923]: I1128 11:12:18.996258 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tdzqr" Nov 28 11:12:19 crc kubenswrapper[4923]: I1128 11:12:19.046918 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tdzqr" Nov 28 11:12:19 crc kubenswrapper[4923]: I1128 11:12:19.940322 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fv729" Nov 28 11:12:19 crc kubenswrapper[4923]: I1128 11:12:19.969680 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fv729" Nov 28 11:12:19 crc kubenswrapper[4923]: I1128 11:12:19.969809 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fv729" event={"ID":"2db4e8e9-8919-4b13-afef-835484cc865a","Type":"ContainerDied","Data":"8b9ef4f3e6c7bd96976f6a30d9cf975dbd7f8b37f3ea266e95d2c52800960112"} Nov 28 11:12:19 crc kubenswrapper[4923]: I1128 11:12:19.969866 4923 scope.go:117] "RemoveContainer" containerID="96be14820db021fd54876d1dfaa8d5a3e3e3fa4bab6a6e5b157947cd7dff7fa1" Nov 28 11:12:19 crc kubenswrapper[4923]: I1128 11:12:19.970463 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q99hs\" (UniqueName: \"kubernetes.io/projected/2db4e8e9-8919-4b13-afef-835484cc865a-kube-api-access-q99hs\") pod \"2db4e8e9-8919-4b13-afef-835484cc865a\" (UID: \"2db4e8e9-8919-4b13-afef-835484cc865a\") " Nov 28 11:12:19 crc kubenswrapper[4923]: I1128 11:12:19.970544 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2db4e8e9-8919-4b13-afef-835484cc865a-utilities\") pod \"2db4e8e9-8919-4b13-afef-835484cc865a\" (UID: \"2db4e8e9-8919-4b13-afef-835484cc865a\") " Nov 28 11:12:19 crc kubenswrapper[4923]: I1128 11:12:19.970626 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2db4e8e9-8919-4b13-afef-835484cc865a-catalog-content\") pod \"2db4e8e9-8919-4b13-afef-835484cc865a\" (UID: \"2db4e8e9-8919-4b13-afef-835484cc865a\") " Nov 28 11:12:19 crc kubenswrapper[4923]: I1128 11:12:19.972594 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2db4e8e9-8919-4b13-afef-835484cc865a-utilities" (OuterVolumeSpecName: "utilities") pod "2db4e8e9-8919-4b13-afef-835484cc865a" (UID: "2db4e8e9-8919-4b13-afef-835484cc865a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:12:19 crc kubenswrapper[4923]: I1128 11:12:19.989232 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2db4e8e9-8919-4b13-afef-835484cc865a-kube-api-access-q99hs" (OuterVolumeSpecName: "kube-api-access-q99hs") pod "2db4e8e9-8919-4b13-afef-835484cc865a" (UID: "2db4e8e9-8919-4b13-afef-835484cc865a"). InnerVolumeSpecName "kube-api-access-q99hs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:12:20 crc kubenswrapper[4923]: I1128 11:12:20.009679 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2db4e8e9-8919-4b13-afef-835484cc865a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2db4e8e9-8919-4b13-afef-835484cc865a" (UID: "2db4e8e9-8919-4b13-afef-835484cc865a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:12:20 crc kubenswrapper[4923]: I1128 11:12:20.072829 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q99hs\" (UniqueName: \"kubernetes.io/projected/2db4e8e9-8919-4b13-afef-835484cc865a-kube-api-access-q99hs\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:20 crc kubenswrapper[4923]: I1128 11:12:20.073142 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2db4e8e9-8919-4b13-afef-835484cc865a-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:20 crc kubenswrapper[4923]: I1128 11:12:20.073163 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2db4e8e9-8919-4b13-afef-835484cc865a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:20 crc kubenswrapper[4923]: I1128 11:12:20.328427 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fv729"] Nov 28 11:12:20 crc kubenswrapper[4923]: I1128 11:12:20.335174 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fv729"] Nov 28 11:12:20 crc kubenswrapper[4923]: I1128 11:12:20.411217 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-n2qp8" Nov 28 11:12:20 crc kubenswrapper[4923]: I1128 11:12:20.466030 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tdzqr" Nov 28 11:12:21 crc kubenswrapper[4923]: I1128 11:12:21.183606 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2db4e8e9-8919-4b13-afef-835484cc865a" path="/var/lib/kubelet/pods/2db4e8e9-8919-4b13-afef-835484cc865a/volumes" Nov 28 11:12:22 crc kubenswrapper[4923]: I1128 11:12:22.296456 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-h4kzb" Nov 28 11:12:22 crc kubenswrapper[4923]: I1128 11:12:22.356467 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-h4kzb" Nov 28 11:12:22 crc kubenswrapper[4923]: I1128 11:12:22.884917 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tdzqr"] Nov 28 11:12:22 crc kubenswrapper[4923]: I1128 11:12:22.885274 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tdzqr" podUID="e28cae41-ead3-4395-a457-3077c92068ca" containerName="registry-server" containerID="cri-o://59bfc149acda33960396f2a0210b66218a455e17699f1faaf34469c03a0adad0" gracePeriod=2 Nov 28 11:12:23 crc kubenswrapper[4923]: I1128 11:12:23.170257 4923 scope.go:117] "RemoveContainer" containerID="18f471e6bb8048080a79fac9e5f6ec4d5a9895606d9830c15af5f3ff4f1cda50" Nov 28 11:12:23 crc kubenswrapper[4923]: I1128 11:12:23.981697 4923 scope.go:117] "RemoveContainer" containerID="a9e8fe96817b2d49b052dfde01a746e54d90b02d1e6a38914d44dc0c0fd8c72b" Nov 28 11:12:24 crc kubenswrapper[4923]: I1128 11:12:23.999980 4923 generic.go:334] "Generic (PLEG): container finished" podID="e28cae41-ead3-4395-a457-3077c92068ca" containerID="59bfc149acda33960396f2a0210b66218a455e17699f1faaf34469c03a0adad0" exitCode=0 Nov 28 11:12:24 crc kubenswrapper[4923]: I1128 11:12:24.000077 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdzqr" event={"ID":"e28cae41-ead3-4395-a457-3077c92068ca","Type":"ContainerDied","Data":"59bfc149acda33960396f2a0210b66218a455e17699f1faaf34469c03a0adad0"} Nov 28 11:12:24 crc kubenswrapper[4923]: I1128 11:12:24.259790 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tdzqr" Nov 28 11:12:24 crc kubenswrapper[4923]: I1128 11:12:24.335719 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e28cae41-ead3-4395-a457-3077c92068ca-utilities\") pod \"e28cae41-ead3-4395-a457-3077c92068ca\" (UID: \"e28cae41-ead3-4395-a457-3077c92068ca\") " Nov 28 11:12:24 crc kubenswrapper[4923]: I1128 11:12:24.335996 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrb2k\" (UniqueName: \"kubernetes.io/projected/e28cae41-ead3-4395-a457-3077c92068ca-kube-api-access-vrb2k\") pod \"e28cae41-ead3-4395-a457-3077c92068ca\" (UID: \"e28cae41-ead3-4395-a457-3077c92068ca\") " Nov 28 11:12:24 crc kubenswrapper[4923]: I1128 11:12:24.336045 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e28cae41-ead3-4395-a457-3077c92068ca-catalog-content\") pod \"e28cae41-ead3-4395-a457-3077c92068ca\" (UID: \"e28cae41-ead3-4395-a457-3077c92068ca\") " Nov 28 11:12:24 crc kubenswrapper[4923]: I1128 11:12:24.336949 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e28cae41-ead3-4395-a457-3077c92068ca-utilities" (OuterVolumeSpecName: "utilities") pod "e28cae41-ead3-4395-a457-3077c92068ca" (UID: "e28cae41-ead3-4395-a457-3077c92068ca"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:12:24 crc kubenswrapper[4923]: I1128 11:12:24.348136 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e28cae41-ead3-4395-a457-3077c92068ca-kube-api-access-vrb2k" (OuterVolumeSpecName: "kube-api-access-vrb2k") pod "e28cae41-ead3-4395-a457-3077c92068ca" (UID: "e28cae41-ead3-4395-a457-3077c92068ca"). InnerVolumeSpecName "kube-api-access-vrb2k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:12:24 crc kubenswrapper[4923]: I1128 11:12:24.399551 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e28cae41-ead3-4395-a457-3077c92068ca-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e28cae41-ead3-4395-a457-3077c92068ca" (UID: "e28cae41-ead3-4395-a457-3077c92068ca"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:12:24 crc kubenswrapper[4923]: I1128 11:12:24.437281 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e28cae41-ead3-4395-a457-3077c92068ca-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:24 crc kubenswrapper[4923]: I1128 11:12:24.437324 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrb2k\" (UniqueName: \"kubernetes.io/projected/e28cae41-ead3-4395-a457-3077c92068ca-kube-api-access-vrb2k\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:24 crc kubenswrapper[4923]: I1128 11:12:24.437336 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e28cae41-ead3-4395-a457-3077c92068ca-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.015568 4923 generic.go:334] "Generic (PLEG): container finished" podID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" containerID="658ebd66c7b75847ddbaec1b023e59d3a812ea63a52fb0fecad9961e55c233b6" exitCode=0 Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.015831 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l4xf8" event={"ID":"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4","Type":"ContainerDied","Data":"658ebd66c7b75847ddbaec1b023e59d3a812ea63a52fb0fecad9961e55c233b6"} Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.025063 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdzqr" event={"ID":"e28cae41-ead3-4395-a457-3077c92068ca","Type":"ContainerDied","Data":"19526b26813de4500fec40b55a09e3db8044eb0cf662eae1b0efbe1f0d2f91bf"} Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.025131 4923 scope.go:117] "RemoveContainer" containerID="59bfc149acda33960396f2a0210b66218a455e17699f1faaf34469c03a0adad0" Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.025302 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tdzqr" Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.038012 4923 generic.go:334] "Generic (PLEG): container finished" podID="f54a8053-81cc-429f-b68e-87a3fd245263" containerID="40e14a00bb193ffdf61d6605f389e306b9cbfe77a94e8fe5f6e4bb47beb27bae" exitCode=0 Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.038114 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-js2pf" event={"ID":"f54a8053-81cc-429f-b68e-87a3fd245263","Type":"ContainerDied","Data":"40e14a00bb193ffdf61d6605f389e306b9cbfe77a94e8fe5f6e4bb47beb27bae"} Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.046854 4923 generic.go:334] "Generic (PLEG): container finished" podID="cffefe1d-9522-408d-aadf-c688411908e1" containerID="9d3de31b4f8f9ffdad295a46f0feab01f0616585979132ebf82818b8b88cb244" exitCode=0 Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.046966 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-46sx6" event={"ID":"cffefe1d-9522-408d-aadf-c688411908e1","Type":"ContainerDied","Data":"9d3de31b4f8f9ffdad295a46f0feab01f0616585979132ebf82818b8b88cb244"} Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.061301 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkkh6" event={"ID":"39eebecc-004d-445a-ac63-fad7bc311127","Type":"ContainerStarted","Data":"6cd659195ccf10b4581b41615cf0f9b3d06bad51789b24595e4edea2961793b8"} Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.065266 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerStarted","Data":"61edbc0d7fbd462cd0df9f2876de70a9446d33b2d98ecb642842c37e988e973d"} Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.080349 4923 scope.go:117] "RemoveContainer" containerID="33fa8b514aa8e1c4992dc062a341f8471806d64935278ff9d23c29d3e235959d" Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.119473 4923 scope.go:117] "RemoveContainer" containerID="2854176fb098059aedd7457ba3d68d85f2bf68ac49940a384325e1fc29bc5768" Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.126792 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tdzqr"] Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.130160 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tdzqr"] Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.180610 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e28cae41-ead3-4395-a457-3077c92068ca" path="/var/lib/kubelet/pods/e28cae41-ead3-4395-a457-3077c92068ca/volumes" Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.278963 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qkkh6" podStartSLOduration=4.444639446 podStartE2EDuration="1m24.278911912s" podCreationTimestamp="2025-11-28 11:11:01 +0000 UTC" firstStartedPulling="2025-11-28 11:11:04.142691264 +0000 UTC m=+143.271375474" lastFinishedPulling="2025-11-28 11:12:23.97696369 +0000 UTC m=+223.105647940" observedRunningTime="2025-11-28 11:12:25.161137718 +0000 UTC m=+224.289822008" watchObservedRunningTime="2025-11-28 11:12:25.278911912 +0000 UTC m=+224.407596172" Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.283572 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h4kzb"] Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.283920 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-h4kzb" podUID="4fa3aef4-051e-48bb-91a3-bd8bd4c2befb" containerName="registry-server" containerID="cri-o://275497f17c7cd70692ddcfe3bd1041e81dc6e2a6076c1366a056832dd495d527" gracePeriod=2 Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.738393 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h4kzb" Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.761603 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb-utilities\") pod \"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb\" (UID: \"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb\") " Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.763367 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb-utilities" (OuterVolumeSpecName: "utilities") pod "4fa3aef4-051e-48bb-91a3-bd8bd4c2befb" (UID: "4fa3aef4-051e-48bb-91a3-bd8bd4c2befb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.763423 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v77lg\" (UniqueName: \"kubernetes.io/projected/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb-kube-api-access-v77lg\") pod \"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb\" (UID: \"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb\") " Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.763456 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb-catalog-content\") pod \"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb\" (UID: \"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb\") " Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.764633 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.769572 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb-kube-api-access-v77lg" (OuterVolumeSpecName: "kube-api-access-v77lg") pod "4fa3aef4-051e-48bb-91a3-bd8bd4c2befb" (UID: "4fa3aef4-051e-48bb-91a3-bd8bd4c2befb"). InnerVolumeSpecName "kube-api-access-v77lg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.866056 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v77lg\" (UniqueName: \"kubernetes.io/projected/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb-kube-api-access-v77lg\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.885352 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4fa3aef4-051e-48bb-91a3-bd8bd4c2befb" (UID: "4fa3aef4-051e-48bb-91a3-bd8bd4c2befb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:12:25 crc kubenswrapper[4923]: I1128 11:12:25.967964 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.072586 4923 generic.go:334] "Generic (PLEG): container finished" podID="4fa3aef4-051e-48bb-91a3-bd8bd4c2befb" containerID="275497f17c7cd70692ddcfe3bd1041e81dc6e2a6076c1366a056832dd495d527" exitCode=0 Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.072634 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h4kzb" Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.072651 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h4kzb" event={"ID":"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb","Type":"ContainerDied","Data":"275497f17c7cd70692ddcfe3bd1041e81dc6e2a6076c1366a056832dd495d527"} Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.072684 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h4kzb" event={"ID":"4fa3aef4-051e-48bb-91a3-bd8bd4c2befb","Type":"ContainerDied","Data":"dfd0457d174dcbe33afcc6f20723ef67e6f884111417ff58f5eef940dcee9af2"} Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.072702 4923 scope.go:117] "RemoveContainer" containerID="275497f17c7cd70692ddcfe3bd1041e81dc6e2a6076c1366a056832dd495d527" Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.074840 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-46sx6" event={"ID":"cffefe1d-9522-408d-aadf-c688411908e1","Type":"ContainerStarted","Data":"f775975c413550383947df61c989c197028a0a3e4706576ef6832f6d44a60806"} Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.081727 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-js2pf" event={"ID":"f54a8053-81cc-429f-b68e-87a3fd245263","Type":"ContainerStarted","Data":"357c8fb2d02d63ba1f049ffe71339e08055730c4c91838dcf695fa385e8077bf"} Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.084133 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l4xf8" event={"ID":"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4","Type":"ContainerStarted","Data":"79306c65c47cb373aa6b330add93214a34b14d596e50348dc5ba71d68c337ffa"} Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.088672 4923 scope.go:117] "RemoveContainer" containerID="474d877990879bb10f9af5ad927d4d81381e15b765fd460873134349006259fd" Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.103411 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-46sx6" podStartSLOduration=5.13297223 podStartE2EDuration="1m28.103386348s" podCreationTimestamp="2025-11-28 11:10:58 +0000 UTC" firstStartedPulling="2025-11-28 11:11:02.652469167 +0000 UTC m=+141.781153377" lastFinishedPulling="2025-11-28 11:12:25.622883285 +0000 UTC m=+224.751567495" observedRunningTime="2025-11-28 11:12:26.09436602 +0000 UTC m=+225.223050230" watchObservedRunningTime="2025-11-28 11:12:26.103386348 +0000 UTC m=+225.232070578" Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.107079 4923 scope.go:117] "RemoveContainer" containerID="06bfba2db2bce8e36e6fd4169013d80d97f1685ab3f84dfb049ad064eba37e31" Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.127031 4923 scope.go:117] "RemoveContainer" containerID="275497f17c7cd70692ddcfe3bd1041e81dc6e2a6076c1366a056832dd495d527" Nov 28 11:12:26 crc kubenswrapper[4923]: E1128 11:12:26.127416 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"275497f17c7cd70692ddcfe3bd1041e81dc6e2a6076c1366a056832dd495d527\": container with ID starting with 275497f17c7cd70692ddcfe3bd1041e81dc6e2a6076c1366a056832dd495d527 not found: ID does not exist" containerID="275497f17c7cd70692ddcfe3bd1041e81dc6e2a6076c1366a056832dd495d527" Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.127443 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"275497f17c7cd70692ddcfe3bd1041e81dc6e2a6076c1366a056832dd495d527"} err="failed to get container status \"275497f17c7cd70692ddcfe3bd1041e81dc6e2a6076c1366a056832dd495d527\": rpc error: code = NotFound desc = could not find container \"275497f17c7cd70692ddcfe3bd1041e81dc6e2a6076c1366a056832dd495d527\": container with ID starting with 275497f17c7cd70692ddcfe3bd1041e81dc6e2a6076c1366a056832dd495d527 not found: ID does not exist" Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.127463 4923 scope.go:117] "RemoveContainer" containerID="474d877990879bb10f9af5ad927d4d81381e15b765fd460873134349006259fd" Nov 28 11:12:26 crc kubenswrapper[4923]: E1128 11:12:26.127646 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"474d877990879bb10f9af5ad927d4d81381e15b765fd460873134349006259fd\": container with ID starting with 474d877990879bb10f9af5ad927d4d81381e15b765fd460873134349006259fd not found: ID does not exist" containerID="474d877990879bb10f9af5ad927d4d81381e15b765fd460873134349006259fd" Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.127661 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"474d877990879bb10f9af5ad927d4d81381e15b765fd460873134349006259fd"} err="failed to get container status \"474d877990879bb10f9af5ad927d4d81381e15b765fd460873134349006259fd\": rpc error: code = NotFound desc = could not find container \"474d877990879bb10f9af5ad927d4d81381e15b765fd460873134349006259fd\": container with ID starting with 474d877990879bb10f9af5ad927d4d81381e15b765fd460873134349006259fd not found: ID does not exist" Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.127673 4923 scope.go:117] "RemoveContainer" containerID="06bfba2db2bce8e36e6fd4169013d80d97f1685ab3f84dfb049ad064eba37e31" Nov 28 11:12:26 crc kubenswrapper[4923]: E1128 11:12:26.127830 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06bfba2db2bce8e36e6fd4169013d80d97f1685ab3f84dfb049ad064eba37e31\": container with ID starting with 06bfba2db2bce8e36e6fd4169013d80d97f1685ab3f84dfb049ad064eba37e31 not found: ID does not exist" containerID="06bfba2db2bce8e36e6fd4169013d80d97f1685ab3f84dfb049ad064eba37e31" Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.127844 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06bfba2db2bce8e36e6fd4169013d80d97f1685ab3f84dfb049ad064eba37e31"} err="failed to get container status \"06bfba2db2bce8e36e6fd4169013d80d97f1685ab3f84dfb049ad064eba37e31\": rpc error: code = NotFound desc = could not find container \"06bfba2db2bce8e36e6fd4169013d80d97f1685ab3f84dfb049ad064eba37e31\": container with ID starting with 06bfba2db2bce8e36e6fd4169013d80d97f1685ab3f84dfb049ad064eba37e31 not found: ID does not exist" Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.142400 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-l4xf8" podStartSLOduration=4.890215801 podStartE2EDuration="1m28.142387372s" podCreationTimestamp="2025-11-28 11:10:58 +0000 UTC" firstStartedPulling="2025-11-28 11:11:02.4633434 +0000 UTC m=+141.592027610" lastFinishedPulling="2025-11-28 11:12:25.715514971 +0000 UTC m=+224.844199181" observedRunningTime="2025-11-28 11:12:26.14057585 +0000 UTC m=+225.269260060" watchObservedRunningTime="2025-11-28 11:12:26.142387372 +0000 UTC m=+225.271071582" Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.144596 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-js2pf" podStartSLOduration=5.286231923 podStartE2EDuration="1m28.144589624s" podCreationTimestamp="2025-11-28 11:10:58 +0000 UTC" firstStartedPulling="2025-11-28 11:11:02.698151709 +0000 UTC m=+141.826835919" lastFinishedPulling="2025-11-28 11:12:25.55650937 +0000 UTC m=+224.685193620" observedRunningTime="2025-11-28 11:12:26.113192838 +0000 UTC m=+225.241877068" watchObservedRunningTime="2025-11-28 11:12:26.144589624 +0000 UTC m=+225.273273834" Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.166300 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h4kzb"] Nov 28 11:12:26 crc kubenswrapper[4923]: I1128 11:12:26.170332 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-h4kzb"] Nov 28 11:12:27 crc kubenswrapper[4923]: I1128 11:12:27.174746 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fa3aef4-051e-48bb-91a3-bd8bd4c2befb" path="/var/lib/kubelet/pods/4fa3aef4-051e-48bb-91a3-bd8bd4c2befb/volumes" Nov 28 11:12:28 crc kubenswrapper[4923]: I1128 11:12:28.605140 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-l4xf8" Nov 28 11:12:28 crc kubenswrapper[4923]: I1128 11:12:28.605546 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-l4xf8" Nov 28 11:12:28 crc kubenswrapper[4923]: I1128 11:12:28.655476 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-46sx6" Nov 28 11:12:28 crc kubenswrapper[4923]: I1128 11:12:28.656568 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-46sx6" Nov 28 11:12:28 crc kubenswrapper[4923]: I1128 11:12:28.679532 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-l4xf8" Nov 28 11:12:28 crc kubenswrapper[4923]: I1128 11:12:28.708858 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-46sx6" Nov 28 11:12:28 crc kubenswrapper[4923]: I1128 11:12:28.803520 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-js2pf" Nov 28 11:12:28 crc kubenswrapper[4923]: I1128 11:12:28.803578 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-js2pf" Nov 28 11:12:28 crc kubenswrapper[4923]: I1128 11:12:28.851834 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-js2pf" Nov 28 11:12:28 crc kubenswrapper[4923]: I1128 11:12:28.883506 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" podUID="ad44a891-fc97-4154-8f93-bbd276c5c18a" containerName="oauth-openshift" containerID="cri-o://e1944c393d47c7de969ae5dc55ca1f3f4d7282cf8018157344ba21b62dcbfef7" gracePeriod=15 Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.103010 4923 generic.go:334] "Generic (PLEG): container finished" podID="ad44a891-fc97-4154-8f93-bbd276c5c18a" containerID="e1944c393d47c7de969ae5dc55ca1f3f4d7282cf8018157344ba21b62dcbfef7" exitCode=0 Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.103175 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" event={"ID":"ad44a891-fc97-4154-8f93-bbd276c5c18a","Type":"ContainerDied","Data":"e1944c393d47c7de969ae5dc55ca1f3f4d7282cf8018157344ba21b62dcbfef7"} Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.369548 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.413132 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-session\") pod \"ad44a891-fc97-4154-8f93-bbd276c5c18a\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.413185 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-template-error\") pod \"ad44a891-fc97-4154-8f93-bbd276c5c18a\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.413220 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-serving-cert\") pod \"ad44a891-fc97-4154-8f93-bbd276c5c18a\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.413241 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-service-ca\") pod \"ad44a891-fc97-4154-8f93-bbd276c5c18a\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.413288 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-cliconfig\") pod \"ad44a891-fc97-4154-8f93-bbd276c5c18a\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.413318 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-router-certs\") pod \"ad44a891-fc97-4154-8f93-bbd276c5c18a\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.413348 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-audit-policies\") pod \"ad44a891-fc97-4154-8f93-bbd276c5c18a\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.413369 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ad44a891-fc97-4154-8f93-bbd276c5c18a-audit-dir\") pod \"ad44a891-fc97-4154-8f93-bbd276c5c18a\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.413393 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-trusted-ca-bundle\") pod \"ad44a891-fc97-4154-8f93-bbd276c5c18a\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.413429 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-idp-0-file-data\") pod \"ad44a891-fc97-4154-8f93-bbd276c5c18a\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.413450 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-ocp-branding-template\") pod \"ad44a891-fc97-4154-8f93-bbd276c5c18a\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.413477 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7vtcd\" (UniqueName: \"kubernetes.io/projected/ad44a891-fc97-4154-8f93-bbd276c5c18a-kube-api-access-7vtcd\") pod \"ad44a891-fc97-4154-8f93-bbd276c5c18a\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.413514 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-template-login\") pod \"ad44a891-fc97-4154-8f93-bbd276c5c18a\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.413537 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-template-provider-selection\") pod \"ad44a891-fc97-4154-8f93-bbd276c5c18a\" (UID: \"ad44a891-fc97-4154-8f93-bbd276c5c18a\") " Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.414690 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "ad44a891-fc97-4154-8f93-bbd276c5c18a" (UID: "ad44a891-fc97-4154-8f93-bbd276c5c18a"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.414767 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "ad44a891-fc97-4154-8f93-bbd276c5c18a" (UID: "ad44a891-fc97-4154-8f93-bbd276c5c18a"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.415121 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "ad44a891-fc97-4154-8f93-bbd276c5c18a" (UID: "ad44a891-fc97-4154-8f93-bbd276c5c18a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.415155 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ad44a891-fc97-4154-8f93-bbd276c5c18a-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "ad44a891-fc97-4154-8f93-bbd276c5c18a" (UID: "ad44a891-fc97-4154-8f93-bbd276c5c18a"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.415489 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "ad44a891-fc97-4154-8f93-bbd276c5c18a" (UID: "ad44a891-fc97-4154-8f93-bbd276c5c18a"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.420452 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "ad44a891-fc97-4154-8f93-bbd276c5c18a" (UID: "ad44a891-fc97-4154-8f93-bbd276c5c18a"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.420811 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "ad44a891-fc97-4154-8f93-bbd276c5c18a" (UID: "ad44a891-fc97-4154-8f93-bbd276c5c18a"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.421238 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "ad44a891-fc97-4154-8f93-bbd276c5c18a" (UID: "ad44a891-fc97-4154-8f93-bbd276c5c18a"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.422314 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "ad44a891-fc97-4154-8f93-bbd276c5c18a" (UID: "ad44a891-fc97-4154-8f93-bbd276c5c18a"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.422521 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "ad44a891-fc97-4154-8f93-bbd276c5c18a" (UID: "ad44a891-fc97-4154-8f93-bbd276c5c18a"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.423642 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "ad44a891-fc97-4154-8f93-bbd276c5c18a" (UID: "ad44a891-fc97-4154-8f93-bbd276c5c18a"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.423876 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "ad44a891-fc97-4154-8f93-bbd276c5c18a" (UID: "ad44a891-fc97-4154-8f93-bbd276c5c18a"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.424008 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "ad44a891-fc97-4154-8f93-bbd276c5c18a" (UID: "ad44a891-fc97-4154-8f93-bbd276c5c18a"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.426571 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad44a891-fc97-4154-8f93-bbd276c5c18a-kube-api-access-7vtcd" (OuterVolumeSpecName: "kube-api-access-7vtcd") pod "ad44a891-fc97-4154-8f93-bbd276c5c18a" (UID: "ad44a891-fc97-4154-8f93-bbd276c5c18a"). InnerVolumeSpecName "kube-api-access-7vtcd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.514506 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.514544 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.514559 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.514572 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.514584 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.514597 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.514609 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.514621 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.514632 4923 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ad44a891-fc97-4154-8f93-bbd276c5c18a-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.514644 4923 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.514655 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.514666 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.514679 4923 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/ad44a891-fc97-4154-8f93-bbd276c5c18a-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:29 crc kubenswrapper[4923]: I1128 11:12:29.514690 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7vtcd\" (UniqueName: \"kubernetes.io/projected/ad44a891-fc97-4154-8f93-bbd276c5c18a-kube-api-access-7vtcd\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.109716 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" event={"ID":"ad44a891-fc97-4154-8f93-bbd276c5c18a","Type":"ContainerDied","Data":"8f8f69906629ca0b0b366dcfd7cd8144a8f6360bde5e04bf377a4813c6416c90"} Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.109734 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-28cv6" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.110858 4923 scope.go:117] "RemoveContainer" containerID="e1944c393d47c7de969ae5dc55ca1f3f4d7282cf8018157344ba21b62dcbfef7" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.159641 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-28cv6"] Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.163433 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-28cv6"] Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.177282 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-46sx6" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.722072 4923 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 11:12:30 crc kubenswrapper[4923]: E1128 11:12:30.722394 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e28cae41-ead3-4395-a457-3077c92068ca" containerName="registry-server" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.722416 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="e28cae41-ead3-4395-a457-3077c92068ca" containerName="registry-server" Nov 28 11:12:30 crc kubenswrapper[4923]: E1128 11:12:30.722437 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2db4e8e9-8919-4b13-afef-835484cc865a" containerName="registry-server" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.722450 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="2db4e8e9-8919-4b13-afef-835484cc865a" containerName="registry-server" Nov 28 11:12:30 crc kubenswrapper[4923]: E1128 11:12:30.722468 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fa3aef4-051e-48bb-91a3-bd8bd4c2befb" containerName="extract-content" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.722482 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fa3aef4-051e-48bb-91a3-bd8bd4c2befb" containerName="extract-content" Nov 28 11:12:30 crc kubenswrapper[4923]: E1128 11:12:30.722503 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fa3aef4-051e-48bb-91a3-bd8bd4c2befb" containerName="registry-server" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.722516 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fa3aef4-051e-48bb-91a3-bd8bd4c2befb" containerName="registry-server" Nov 28 11:12:30 crc kubenswrapper[4923]: E1128 11:12:30.722535 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15d2531b-dca2-400b-8a64-63a946c97693" containerName="pruner" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.722548 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="15d2531b-dca2-400b-8a64-63a946c97693" containerName="pruner" Nov 28 11:12:30 crc kubenswrapper[4923]: E1128 11:12:30.722565 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e28cae41-ead3-4395-a457-3077c92068ca" containerName="extract-content" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.722578 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="e28cae41-ead3-4395-a457-3077c92068ca" containerName="extract-content" Nov 28 11:12:30 crc kubenswrapper[4923]: E1128 11:12:30.722596 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e28cae41-ead3-4395-a457-3077c92068ca" containerName="extract-utilities" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.722610 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="e28cae41-ead3-4395-a457-3077c92068ca" containerName="extract-utilities" Nov 28 11:12:30 crc kubenswrapper[4923]: E1128 11:12:30.722630 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2db4e8e9-8919-4b13-afef-835484cc865a" containerName="extract-utilities" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.722642 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="2db4e8e9-8919-4b13-afef-835484cc865a" containerName="extract-utilities" Nov 28 11:12:30 crc kubenswrapper[4923]: E1128 11:12:30.722660 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fa3aef4-051e-48bb-91a3-bd8bd4c2befb" containerName="extract-utilities" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.722672 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fa3aef4-051e-48bb-91a3-bd8bd4c2befb" containerName="extract-utilities" Nov 28 11:12:30 crc kubenswrapper[4923]: E1128 11:12:30.722688 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2db4e8e9-8919-4b13-afef-835484cc865a" containerName="extract-content" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.722701 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="2db4e8e9-8919-4b13-afef-835484cc865a" containerName="extract-content" Nov 28 11:12:30 crc kubenswrapper[4923]: E1128 11:12:30.722717 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad44a891-fc97-4154-8f93-bbd276c5c18a" containerName="oauth-openshift" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.722749 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad44a891-fc97-4154-8f93-bbd276c5c18a" containerName="oauth-openshift" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.722914 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="e28cae41-ead3-4395-a457-3077c92068ca" containerName="registry-server" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.722969 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad44a891-fc97-4154-8f93-bbd276c5c18a" containerName="oauth-openshift" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.722993 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="2db4e8e9-8919-4b13-afef-835484cc865a" containerName="registry-server" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.723010 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fa3aef4-051e-48bb-91a3-bd8bd4c2befb" containerName="registry-server" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.723029 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="15d2531b-dca2-400b-8a64-63a946c97693" containerName="pruner" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.723551 4923 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.723804 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.724136 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3" gracePeriod=15 Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.724214 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf" gracePeriod=15 Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.724211 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9" gracePeriod=15 Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.724295 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8" gracePeriod=15 Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.724337 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54" gracePeriod=15 Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.730033 4923 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 11:12:30 crc kubenswrapper[4923]: E1128 11:12:30.730297 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.730316 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 11:12:30 crc kubenswrapper[4923]: E1128 11:12:30.730335 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.730347 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 28 11:12:30 crc kubenswrapper[4923]: E1128 11:12:30.730370 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.730382 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 11:12:30 crc kubenswrapper[4923]: E1128 11:12:30.730401 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.730412 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 11:12:30 crc kubenswrapper[4923]: E1128 11:12:30.730433 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.730448 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 11:12:30 crc kubenswrapper[4923]: E1128 11:12:30.730464 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.730476 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.730644 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.730661 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.730675 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.730690 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.730708 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.730723 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 11:12:30 crc kubenswrapper[4923]: E1128 11:12:30.730910 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.730923 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.774219 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.830446 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.830546 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.830600 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.830781 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.830858 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.830978 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.831027 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.831062 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.931679 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.931766 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.931786 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.931817 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.931844 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.931855 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.931874 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.931925 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.931996 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.931925 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.932022 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.932049 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.932063 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.932105 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.932115 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:12:30 crc kubenswrapper[4923]: I1128 11:12:30.932136 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:31 crc kubenswrapper[4923]: I1128 11:12:31.070567 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:12:31 crc kubenswrapper[4923]: W1128 11:12:31.096671 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-97f04afe3de99ddb4e969999b34864e527478370c0ee33589106a113b3a88f03 WatchSource:0}: Error finding container 97f04afe3de99ddb4e969999b34864e527478370c0ee33589106a113b3a88f03: Status 404 returned error can't find the container with id 97f04afe3de99ddb4e969999b34864e527478370c0ee33589106a113b3a88f03 Nov 28 11:12:31 crc kubenswrapper[4923]: I1128 11:12:31.119112 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"97f04afe3de99ddb4e969999b34864e527478370c0ee33589106a113b3a88f03"} Nov 28 11:12:31 crc kubenswrapper[4923]: I1128 11:12:31.178215 4923 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Nov 28 11:12:31 crc kubenswrapper[4923]: I1128 11:12:31.190969 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad44a891-fc97-4154-8f93-bbd276c5c18a" path="/var/lib/kubelet/pods/ad44a891-fc97-4154-8f93-bbd276c5c18a/volumes" Nov 28 11:12:31 crc kubenswrapper[4923]: I1128 11:12:31.784915 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qkkh6" Nov 28 11:12:31 crc kubenswrapper[4923]: I1128 11:12:31.785050 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qkkh6" Nov 28 11:12:31 crc kubenswrapper[4923]: I1128 11:12:31.860060 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qkkh6" Nov 28 11:12:32 crc kubenswrapper[4923]: I1128 11:12:32.192666 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qkkh6" Nov 28 11:12:33 crc kubenswrapper[4923]: I1128 11:12:33.148854 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 11:12:33 crc kubenswrapper[4923]: I1128 11:12:33.151044 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 11:12:33 crc kubenswrapper[4923]: I1128 11:12:33.152205 4923 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf" exitCode=0 Nov 28 11:12:33 crc kubenswrapper[4923]: I1128 11:12:33.152245 4923 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9" exitCode=0 Nov 28 11:12:33 crc kubenswrapper[4923]: I1128 11:12:33.152257 4923 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8" exitCode=0 Nov 28 11:12:33 crc kubenswrapper[4923]: I1128 11:12:33.152268 4923 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54" exitCode=2 Nov 28 11:12:33 crc kubenswrapper[4923]: I1128 11:12:33.152353 4923 scope.go:117] "RemoveContainer" containerID="fc06f87c8ea0744810e2b9cb7ff8bb529fc1b2133ab79d12eb8e6129accd3e18" Nov 28 11:12:33 crc kubenswrapper[4923]: I1128 11:12:33.155290 4923 generic.go:334] "Generic (PLEG): container finished" podID="ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" containerID="c9f0deea7ba5b7efd2e37fe1291080f52b70eb99e2ebf922ad287e9e2ea10961" exitCode=0 Nov 28 11:12:33 crc kubenswrapper[4923]: I1128 11:12:33.155404 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8","Type":"ContainerDied","Data":"c9f0deea7ba5b7efd2e37fe1291080f52b70eb99e2ebf922ad287e9e2ea10961"} Nov 28 11:12:33 crc kubenswrapper[4923]: I1128 11:12:33.156764 4923 status_manager.go:851] "Failed to get status for pod" podUID="ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:33 crc kubenswrapper[4923]: I1128 11:12:33.160193 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"38064a9c80b0288cd193292dd4b356e9aa5202fb02dc73e3b199387e46163d03"} Nov 28 11:12:33 crc kubenswrapper[4923]: I1128 11:12:33.160400 4923 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:33 crc kubenswrapper[4923]: I1128 11:12:33.160817 4923 status_manager.go:851] "Failed to get status for pod" podUID="ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.169900 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.635229 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.636587 4923 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.637162 4923 status_manager.go:851] "Failed to get status for pod" podUID="ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.643902 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.645302 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.646001 4923 status_manager.go:851] "Failed to get status for pod" podUID="ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.646387 4923 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.646768 4923 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.682400 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.682446 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.682479 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ffd50eaf-eb37-40b2-80b0-a7f71f7371f8-kubelet-dir\") pod \"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8\" (UID: \"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8\") " Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.682565 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffd50eaf-eb37-40b2-80b0-a7f71f7371f8-kube-api-access\") pod \"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8\" (UID: \"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8\") " Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.682603 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.682646 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/ffd50eaf-eb37-40b2-80b0-a7f71f7371f8-var-lock\") pod \"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8\" (UID: \"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8\") " Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.682986 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ffd50eaf-eb37-40b2-80b0-a7f71f7371f8-var-lock" (OuterVolumeSpecName: "var-lock") pod "ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" (UID: "ffd50eaf-eb37-40b2-80b0-a7f71f7371f8"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.683036 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.683063 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.683091 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ffd50eaf-eb37-40b2-80b0-a7f71f7371f8-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" (UID: "ffd50eaf-eb37-40b2-80b0-a7f71f7371f8"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.684055 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.691607 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffd50eaf-eb37-40b2-80b0-a7f71f7371f8-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" (UID: "ffd50eaf-eb37-40b2-80b0-a7f71f7371f8"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.784325 4923 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.784372 4923 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.784391 4923 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ffd50eaf-eb37-40b2-80b0-a7f71f7371f8-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.784408 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffd50eaf-eb37-40b2-80b0-a7f71f7371f8-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.784428 4923 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:34 crc kubenswrapper[4923]: I1128 11:12:34.784445 4923 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/ffd50eaf-eb37-40b2-80b0-a7f71f7371f8-var-lock\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.180489 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.181550 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.186064 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"ffd50eaf-eb37-40b2-80b0-a7f71f7371f8","Type":"ContainerDied","Data":"a09f4bcb60f6eac6a172225cb1a72c3b636a1e72bdeebf8de459f2919c30a331"} Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.186127 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a09f4bcb60f6eac6a172225cb1a72c3b636a1e72bdeebf8de459f2919c30a331" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.188658 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.189709 4923 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3" exitCode=0 Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.189775 4923 scope.go:117] "RemoveContainer" containerID="fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.189834 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.191334 4923 status_manager.go:851] "Failed to get status for pod" podUID="ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.192466 4923 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.193108 4923 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.220218 4923 scope.go:117] "RemoveContainer" containerID="c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.220779 4923 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.221519 4923 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.222849 4923 status_manager.go:851] "Failed to get status for pod" podUID="ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.230165 4923 status_manager.go:851] "Failed to get status for pod" podUID="ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.230980 4923 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.231462 4923 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.245910 4923 scope.go:117] "RemoveContainer" containerID="28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.267989 4923 scope.go:117] "RemoveContainer" containerID="28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.292328 4923 scope.go:117] "RemoveContainer" containerID="eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.318249 4923 scope.go:117] "RemoveContainer" containerID="a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.352071 4923 scope.go:117] "RemoveContainer" containerID="fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf" Nov 28 11:12:35 crc kubenswrapper[4923]: E1128 11:12:35.353037 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\": container with ID starting with fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf not found: ID does not exist" containerID="fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.353346 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf"} err="failed to get container status \"fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\": rpc error: code = NotFound desc = could not find container \"fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf\": container with ID starting with fdb7df64556e877b9dd56be5e97103abc8aa8b28a43b4a5389d0f6e2489057cf not found: ID does not exist" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.353726 4923 scope.go:117] "RemoveContainer" containerID="c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9" Nov 28 11:12:35 crc kubenswrapper[4923]: E1128 11:12:35.354738 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\": container with ID starting with c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9 not found: ID does not exist" containerID="c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.354803 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9"} err="failed to get container status \"c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\": rpc error: code = NotFound desc = could not find container \"c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9\": container with ID starting with c6f085f1fd5a1ed6abe0727d6a94c95fb1b97a9f00a0dc157f62f68698c25ba9 not found: ID does not exist" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.354848 4923 scope.go:117] "RemoveContainer" containerID="28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8" Nov 28 11:12:35 crc kubenswrapper[4923]: E1128 11:12:35.356544 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\": container with ID starting with 28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8 not found: ID does not exist" containerID="28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.356586 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8"} err="failed to get container status \"28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\": rpc error: code = NotFound desc = could not find container \"28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8\": container with ID starting with 28093276aebb4751d979649c4ced86f500308d0d4dde397771c0e1e968250ec8 not found: ID does not exist" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.356655 4923 scope.go:117] "RemoveContainer" containerID="28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54" Nov 28 11:12:35 crc kubenswrapper[4923]: E1128 11:12:35.357236 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\": container with ID starting with 28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54 not found: ID does not exist" containerID="28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.357429 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54"} err="failed to get container status \"28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\": rpc error: code = NotFound desc = could not find container \"28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54\": container with ID starting with 28ae91e6197ea506c337abdbce14a048856e6bda9b35c5de922904c26bc96a54 not found: ID does not exist" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.357577 4923 scope.go:117] "RemoveContainer" containerID="eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3" Nov 28 11:12:35 crc kubenswrapper[4923]: E1128 11:12:35.358152 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\": container with ID starting with eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3 not found: ID does not exist" containerID="eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.358196 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3"} err="failed to get container status \"eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\": rpc error: code = NotFound desc = could not find container \"eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3\": container with ID starting with eece6b2154126c64202c6cb5a8b2953275ed2dc75e76fef6aaf2c4b82a1979f3 not found: ID does not exist" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.358229 4923 scope.go:117] "RemoveContainer" containerID="a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005" Nov 28 11:12:35 crc kubenswrapper[4923]: E1128 11:12:35.358590 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\": container with ID starting with a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005 not found: ID does not exist" containerID="a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005" Nov 28 11:12:35 crc kubenswrapper[4923]: I1128 11:12:35.358627 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005"} err="failed to get container status \"a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\": rpc error: code = NotFound desc = could not find container \"a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005\": container with ID starting with a626cce284eaf6364d8bf3e7f4496f355e73b35810f320718f26e4639455b005 not found: ID does not exist" Nov 28 11:12:36 crc kubenswrapper[4923]: E1128 11:12:36.763287 4923 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:36 crc kubenswrapper[4923]: E1128 11:12:36.764125 4923 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:36 crc kubenswrapper[4923]: E1128 11:12:36.764351 4923 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:36 crc kubenswrapper[4923]: E1128 11:12:36.764529 4923 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:36 crc kubenswrapper[4923]: E1128 11:12:36.764743 4923 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:36 crc kubenswrapper[4923]: I1128 11:12:36.764764 4923 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 28 11:12:36 crc kubenswrapper[4923]: E1128 11:12:36.764956 4923 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.196:6443: connect: connection refused" interval="200ms" Nov 28 11:12:36 crc kubenswrapper[4923]: E1128 11:12:36.965873 4923 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.196:6443: connect: connection refused" interval="400ms" Nov 28 11:12:37 crc kubenswrapper[4923]: E1128 11:12:37.259685 4923 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.196:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c2749df70a820 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Created,Message:Created container startup-monitor,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 11:12:32.25718992 +0000 UTC m=+231.385874170,LastTimestamp:2025-11-28 11:12:32.25718992 +0000 UTC m=+231.385874170,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 11:12:37 crc kubenswrapper[4923]: E1128 11:12:37.366663 4923 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.196:6443: connect: connection refused" interval="800ms" Nov 28 11:12:38 crc kubenswrapper[4923]: E1128 11:12:38.168139 4923 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.196:6443: connect: connection refused" interval="1.6s" Nov 28 11:12:38 crc kubenswrapper[4923]: I1128 11:12:38.679889 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-l4xf8" Nov 28 11:12:38 crc kubenswrapper[4923]: I1128 11:12:38.680429 4923 status_manager.go:851] "Failed to get status for pod" podUID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" pod="openshift-marketplace/certified-operators-l4xf8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-l4xf8\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:38 crc kubenswrapper[4923]: I1128 11:12:38.680875 4923 status_manager.go:851] "Failed to get status for pod" podUID="ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:38 crc kubenswrapper[4923]: I1128 11:12:38.682142 4923 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:38 crc kubenswrapper[4923]: I1128 11:12:38.854515 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-js2pf" Nov 28 11:12:38 crc kubenswrapper[4923]: I1128 11:12:38.855178 4923 status_manager.go:851] "Failed to get status for pod" podUID="f54a8053-81cc-429f-b68e-87a3fd245263" pod="openshift-marketplace/certified-operators-js2pf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-js2pf\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:38 crc kubenswrapper[4923]: I1128 11:12:38.855608 4923 status_manager.go:851] "Failed to get status for pod" podUID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" pod="openshift-marketplace/certified-operators-l4xf8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-l4xf8\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:38 crc kubenswrapper[4923]: I1128 11:12:38.855860 4923 status_manager.go:851] "Failed to get status for pod" podUID="ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:38 crc kubenswrapper[4923]: I1128 11:12:38.856196 4923 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:39 crc kubenswrapper[4923]: E1128 11:12:39.769236 4923 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.196:6443: connect: connection refused" interval="3.2s" Nov 28 11:12:41 crc kubenswrapper[4923]: I1128 11:12:41.181331 4923 status_manager.go:851] "Failed to get status for pod" podUID="f54a8053-81cc-429f-b68e-87a3fd245263" pod="openshift-marketplace/certified-operators-js2pf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-js2pf\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:41 crc kubenswrapper[4923]: I1128 11:12:41.183201 4923 status_manager.go:851] "Failed to get status for pod" podUID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" pod="openshift-marketplace/certified-operators-l4xf8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-l4xf8\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:41 crc kubenswrapper[4923]: I1128 11:12:41.184312 4923 status_manager.go:851] "Failed to get status for pod" podUID="ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:41 crc kubenswrapper[4923]: I1128 11:12:41.184894 4923 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:42 crc kubenswrapper[4923]: E1128 11:12:42.970901 4923 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.196:6443: connect: connection refused" interval="6.4s" Nov 28 11:12:44 crc kubenswrapper[4923]: I1128 11:12:44.168335 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:44 crc kubenswrapper[4923]: I1128 11:12:44.170193 4923 status_manager.go:851] "Failed to get status for pod" podUID="f54a8053-81cc-429f-b68e-87a3fd245263" pod="openshift-marketplace/certified-operators-js2pf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-js2pf\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:44 crc kubenswrapper[4923]: I1128 11:12:44.170630 4923 status_manager.go:851] "Failed to get status for pod" podUID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" pod="openshift-marketplace/certified-operators-l4xf8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-l4xf8\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:44 crc kubenswrapper[4923]: I1128 11:12:44.171159 4923 status_manager.go:851] "Failed to get status for pod" podUID="ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:44 crc kubenswrapper[4923]: I1128 11:12:44.171701 4923 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:44 crc kubenswrapper[4923]: I1128 11:12:44.195767 4923 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="2c83fada-ddb5-4acd-99c4-74d9f42e6250" Nov 28 11:12:44 crc kubenswrapper[4923]: I1128 11:12:44.195815 4923 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="2c83fada-ddb5-4acd-99c4-74d9f42e6250" Nov 28 11:12:44 crc kubenswrapper[4923]: E1128 11:12:44.196421 4923 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:44 crc kubenswrapper[4923]: I1128 11:12:44.197071 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:44 crc kubenswrapper[4923]: W1128 11:12:44.231060 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-06835c3f19bc7cdafb8bf1a3cbee52b25afd4c7300b0c6012c19f6e7a3c68189 WatchSource:0}: Error finding container 06835c3f19bc7cdafb8bf1a3cbee52b25afd4c7300b0c6012c19f6e7a3c68189: Status 404 returned error can't find the container with id 06835c3f19bc7cdafb8bf1a3cbee52b25afd4c7300b0c6012c19f6e7a3c68189 Nov 28 11:12:44 crc kubenswrapper[4923]: I1128 11:12:44.247598 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"06835c3f19bc7cdafb8bf1a3cbee52b25afd4c7300b0c6012c19f6e7a3c68189"} Nov 28 11:12:44 crc kubenswrapper[4923]: I1128 11:12:44.798572 4923 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Liveness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 28 11:12:44 crc kubenswrapper[4923]: I1128 11:12:44.798649 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 28 11:12:45 crc kubenswrapper[4923]: I1128 11:12:45.260275 4923 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="7ae4e5d898e84f37b33ff2195ad9487ae788737a40799f07cff9b61a76568c84" exitCode=0 Nov 28 11:12:45 crc kubenswrapper[4923]: I1128 11:12:45.261724 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"7ae4e5d898e84f37b33ff2195ad9487ae788737a40799f07cff9b61a76568c84"} Nov 28 11:12:45 crc kubenswrapper[4923]: I1128 11:12:45.262197 4923 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="2c83fada-ddb5-4acd-99c4-74d9f42e6250" Nov 28 11:12:45 crc kubenswrapper[4923]: I1128 11:12:45.262263 4923 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="2c83fada-ddb5-4acd-99c4-74d9f42e6250" Nov 28 11:12:45 crc kubenswrapper[4923]: E1128 11:12:45.263131 4923 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:45 crc kubenswrapper[4923]: I1128 11:12:45.263333 4923 status_manager.go:851] "Failed to get status for pod" podUID="f54a8053-81cc-429f-b68e-87a3fd245263" pod="openshift-marketplace/certified-operators-js2pf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-js2pf\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:45 crc kubenswrapper[4923]: I1128 11:12:45.263928 4923 status_manager.go:851] "Failed to get status for pod" podUID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" pod="openshift-marketplace/certified-operators-l4xf8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-l4xf8\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:45 crc kubenswrapper[4923]: I1128 11:12:45.264791 4923 status_manager.go:851] "Failed to get status for pod" podUID="ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:45 crc kubenswrapper[4923]: I1128 11:12:45.265867 4923 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:45 crc kubenswrapper[4923]: I1128 11:12:45.268562 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 11:12:45 crc kubenswrapper[4923]: I1128 11:12:45.268714 4923 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b" exitCode=1 Nov 28 11:12:45 crc kubenswrapper[4923]: I1128 11:12:45.268760 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b"} Nov 28 11:12:45 crc kubenswrapper[4923]: I1128 11:12:45.269453 4923 scope.go:117] "RemoveContainer" containerID="8f83e92b35264fccdd516d857e5a574a7156f7615b643691b6f8694daa38089b" Nov 28 11:12:45 crc kubenswrapper[4923]: I1128 11:12:45.269994 4923 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:45 crc kubenswrapper[4923]: I1128 11:12:45.270723 4923 status_manager.go:851] "Failed to get status for pod" podUID="f54a8053-81cc-429f-b68e-87a3fd245263" pod="openshift-marketplace/certified-operators-js2pf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-js2pf\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:45 crc kubenswrapper[4923]: I1128 11:12:45.271618 4923 status_manager.go:851] "Failed to get status for pod" podUID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" pod="openshift-marketplace/certified-operators-l4xf8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-l4xf8\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:45 crc kubenswrapper[4923]: I1128 11:12:45.272167 4923 status_manager.go:851] "Failed to get status for pod" podUID="ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:45 crc kubenswrapper[4923]: I1128 11:12:45.273015 4923 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.196:6443: connect: connection refused" Nov 28 11:12:45 crc kubenswrapper[4923]: E1128 11:12:45.624717 4923 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.196:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c2749df70a820 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Created,Message:Created container startup-monitor,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 11:12:32.25718992 +0000 UTC m=+231.385874170,LastTimestamp:2025-11-28 11:12:32.25718992 +0000 UTC m=+231.385874170,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 11:12:46 crc kubenswrapper[4923]: I1128 11:12:46.275637 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 11:12:46 crc kubenswrapper[4923]: I1128 11:12:46.275690 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"d9cdd6dc9f29176a16c0a127443a234482eeef30d52df266631ef62a608da88d"} Nov 28 11:12:46 crc kubenswrapper[4923]: I1128 11:12:46.282277 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"3b36db5c9113f2302c798b121ada780cf8ddc31f13988c4134a5f749eb721a92"} Nov 28 11:12:46 crc kubenswrapper[4923]: I1128 11:12:46.282314 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"a07ca35a328df2fcf6e1fda619d37c00203e104265963b482f78fdde131cbff3"} Nov 28 11:12:46 crc kubenswrapper[4923]: I1128 11:12:46.282324 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"325f0ed2a402b4a5271aa2744ac2677af15dcc51710d25e43babeb439a611d3b"} Nov 28 11:12:47 crc kubenswrapper[4923]: I1128 11:12:47.289097 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"7c88e5b2c541bee3ca6042b32b6592b52a0e659c6af3f9319d8551c3f070b9db"} Nov 28 11:12:47 crc kubenswrapper[4923]: I1128 11:12:47.289376 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:47 crc kubenswrapper[4923]: I1128 11:12:47.289389 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"341c9363e36aa7dd842eac376f085976859c732861a55faac5348a63d9280264"} Nov 28 11:12:47 crc kubenswrapper[4923]: I1128 11:12:47.289339 4923 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="2c83fada-ddb5-4acd-99c4-74d9f42e6250" Nov 28 11:12:47 crc kubenswrapper[4923]: I1128 11:12:47.289404 4923 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="2c83fada-ddb5-4acd-99c4-74d9f42e6250" Nov 28 11:12:49 crc kubenswrapper[4923]: I1128 11:12:49.197293 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:49 crc kubenswrapper[4923]: I1128 11:12:49.197363 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:49 crc kubenswrapper[4923]: I1128 11:12:49.207912 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:49 crc kubenswrapper[4923]: I1128 11:12:49.478882 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:12:49 crc kubenswrapper[4923]: I1128 11:12:49.486014 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:12:50 crc kubenswrapper[4923]: I1128 11:12:50.307305 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:12:52 crc kubenswrapper[4923]: I1128 11:12:52.297915 4923 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:52 crc kubenswrapper[4923]: I1128 11:12:52.323230 4923 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="2c83fada-ddb5-4acd-99c4-74d9f42e6250" Nov 28 11:12:52 crc kubenswrapper[4923]: I1128 11:12:52.323253 4923 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="2c83fada-ddb5-4acd-99c4-74d9f42e6250" Nov 28 11:12:52 crc kubenswrapper[4923]: I1128 11:12:52.326453 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:12:52 crc kubenswrapper[4923]: I1128 11:12:52.328321 4923 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="d53ff23f-75d6-4476-8bd8-14bc5bc55bf5" Nov 28 11:12:53 crc kubenswrapper[4923]: I1128 11:12:53.327852 4923 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="2c83fada-ddb5-4acd-99c4-74d9f42e6250" Nov 28 11:12:53 crc kubenswrapper[4923]: I1128 11:12:53.327883 4923 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="2c83fada-ddb5-4acd-99c4-74d9f42e6250" Nov 28 11:13:01 crc kubenswrapper[4923]: I1128 11:13:01.198783 4923 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="d53ff23f-75d6-4476-8bd8-14bc5bc55bf5" Nov 28 11:13:02 crc kubenswrapper[4923]: I1128 11:13:02.028173 4923 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 11:13:02 crc kubenswrapper[4923]: I1128 11:13:02.238648 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 28 11:13:02 crc kubenswrapper[4923]: I1128 11:13:02.626182 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 28 11:13:02 crc kubenswrapper[4923]: I1128 11:13:02.844527 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 28 11:13:02 crc kubenswrapper[4923]: I1128 11:13:02.955260 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 11:13:03 crc kubenswrapper[4923]: I1128 11:13:03.091052 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 11:13:03 crc kubenswrapper[4923]: I1128 11:13:03.297165 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 28 11:13:03 crc kubenswrapper[4923]: I1128 11:13:03.406375 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 11:13:03 crc kubenswrapper[4923]: I1128 11:13:03.482837 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 28 11:13:03 crc kubenswrapper[4923]: I1128 11:13:03.954020 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 11:13:03 crc kubenswrapper[4923]: I1128 11:13:03.999467 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 11:13:04 crc kubenswrapper[4923]: I1128 11:13:04.183572 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 28 11:13:04 crc kubenswrapper[4923]: I1128 11:13:04.214927 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 11:13:04 crc kubenswrapper[4923]: I1128 11:13:04.355184 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 28 11:13:04 crc kubenswrapper[4923]: I1128 11:13:04.426822 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 28 11:13:04 crc kubenswrapper[4923]: I1128 11:13:04.585572 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 28 11:13:04 crc kubenswrapper[4923]: I1128 11:13:04.686840 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 28 11:13:04 crc kubenswrapper[4923]: I1128 11:13:04.798689 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 28 11:13:04 crc kubenswrapper[4923]: I1128 11:13:04.924375 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.195664 4923 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.203645 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=35.203620706 podStartE2EDuration="35.203620706s" podCreationTimestamp="2025-11-28 11:12:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:12:52.18861685 +0000 UTC m=+251.317301060" watchObservedRunningTime="2025-11-28 11:13:05.203620706 +0000 UTC m=+264.332304946" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.204295 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.204359 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-65c4c84884-p8ltw"] Nov 28 11:13:05 crc kubenswrapper[4923]: E1128 11:13:05.204652 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" containerName="installer" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.204680 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" containerName="installer" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.204859 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffd50eaf-eb37-40b2-80b0-a7f71f7371f8" containerName="installer" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.204872 4923 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="2c83fada-ddb5-4acd-99c4-74d9f42e6250" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.204910 4923 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="2c83fada-ddb5-4acd-99c4-74d9f42e6250" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.205572 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.210903 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.211257 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.214374 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.214730 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.214762 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.215098 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.215630 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.215642 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.215679 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.216584 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.216923 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.217067 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.217328 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.225558 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.234062 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.234323 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.260912 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=13.260892242 podStartE2EDuration="13.260892242s" podCreationTimestamp="2025-11-28 11:12:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:13:05.257498245 +0000 UTC m=+264.386182495" watchObservedRunningTime="2025-11-28 11:13:05.260892242 +0000 UTC m=+264.389576452" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.308376 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b9d0f7a6-d281-474e-b196-9208ad74cba4-audit-dir\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.308432 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-cliconfig\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.308459 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.308486 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-user-template-error\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.308560 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b9d0f7a6-d281-474e-b196-9208ad74cba4-audit-policies\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.308618 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-service-ca\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.308671 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.308717 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-router-certs\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.308758 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-serving-cert\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.308793 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s96x2\" (UniqueName: \"kubernetes.io/projected/b9d0f7a6-d281-474e-b196-9208ad74cba4-kube-api-access-s96x2\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.308869 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-user-template-login\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.308900 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-session\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.308990 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.309020 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.404963 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.410574 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.410667 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.410743 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b9d0f7a6-d281-474e-b196-9208ad74cba4-audit-dir\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.410794 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-cliconfig\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.410841 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.410897 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-user-template-error\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.410925 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b9d0f7a6-d281-474e-b196-9208ad74cba4-audit-dir\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.411124 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b9d0f7a6-d281-474e-b196-9208ad74cba4-audit-policies\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.411175 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-service-ca\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.411222 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.411270 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-router-certs\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.411328 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-serving-cert\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.411380 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s96x2\" (UniqueName: \"kubernetes.io/projected/b9d0f7a6-d281-474e-b196-9208ad74cba4-kube-api-access-s96x2\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.411441 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-user-template-login\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.411508 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-session\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.412468 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-cliconfig\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.412812 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.413057 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b9d0f7a6-d281-474e-b196-9208ad74cba4-audit-policies\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.413088 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-service-ca\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.419675 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-serving-cert\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.420168 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.421544 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-session\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.422769 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-router-certs\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.422782 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-user-template-login\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.433361 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.433477 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-user-template-error\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.434246 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b9d0f7a6-d281-474e-b196-9208ad74cba4-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.450902 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s96x2\" (UniqueName: \"kubernetes.io/projected/b9d0f7a6-d281-474e-b196-9208ad74cba4-kube-api-access-s96x2\") pod \"oauth-openshift-65c4c84884-p8ltw\" (UID: \"b9d0f7a6-d281-474e-b196-9208ad74cba4\") " pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.471575 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.493877 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.530826 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.655050 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.954157 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 28 11:13:05 crc kubenswrapper[4923]: I1128 11:13:05.966247 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 28 11:13:06 crc kubenswrapper[4923]: I1128 11:13:06.017874 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 28 11:13:06 crc kubenswrapper[4923]: I1128 11:13:06.018917 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 11:13:06 crc kubenswrapper[4923]: I1128 11:13:06.038813 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 28 11:13:06 crc kubenswrapper[4923]: I1128 11:13:06.040555 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 28 11:13:06 crc kubenswrapper[4923]: I1128 11:13:06.083258 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 28 11:13:06 crc kubenswrapper[4923]: I1128 11:13:06.230713 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 28 11:13:06 crc kubenswrapper[4923]: I1128 11:13:06.352394 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 11:13:06 crc kubenswrapper[4923]: I1128 11:13:06.366274 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 28 11:13:06 crc kubenswrapper[4923]: I1128 11:13:06.399441 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 28 11:13:06 crc kubenswrapper[4923]: I1128 11:13:06.478086 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 28 11:13:06 crc kubenswrapper[4923]: I1128 11:13:06.504299 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 28 11:13:06 crc kubenswrapper[4923]: I1128 11:13:06.513757 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 28 11:13:06 crc kubenswrapper[4923]: I1128 11:13:06.536921 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 28 11:13:06 crc kubenswrapper[4923]: I1128 11:13:06.609675 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 28 11:13:06 crc kubenswrapper[4923]: I1128 11:13:06.625289 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 28 11:13:06 crc kubenswrapper[4923]: I1128 11:13:06.769882 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 28 11:13:06 crc kubenswrapper[4923]: I1128 11:13:06.831705 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 11:13:06 crc kubenswrapper[4923]: I1128 11:13:06.869215 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.019737 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.068088 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.221356 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.225281 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.242896 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.321959 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.364015 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.373907 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.394731 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.432605 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.433390 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.456239 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.511964 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.525101 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.542310 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.569276 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.625697 4923 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.662855 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.707670 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.808446 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.817879 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.859341 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.961414 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 11:13:07 crc kubenswrapper[4923]: I1128 11:13:07.974512 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 28 11:13:08 crc kubenswrapper[4923]: I1128 11:13:08.283677 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 11:13:08 crc kubenswrapper[4923]: I1128 11:13:08.304437 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 28 11:13:08 crc kubenswrapper[4923]: I1128 11:13:08.346906 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 11:13:08 crc kubenswrapper[4923]: I1128 11:13:08.355157 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 28 11:13:08 crc kubenswrapper[4923]: I1128 11:13:08.375458 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 11:13:08 crc kubenswrapper[4923]: I1128 11:13:08.484838 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 28 11:13:08 crc kubenswrapper[4923]: I1128 11:13:08.504712 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 11:13:08 crc kubenswrapper[4923]: I1128 11:13:08.665691 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 11:13:08 crc kubenswrapper[4923]: I1128 11:13:08.709430 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 28 11:13:08 crc kubenswrapper[4923]: I1128 11:13:08.750836 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 28 11:13:08 crc kubenswrapper[4923]: I1128 11:13:08.759592 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 28 11:13:08 crc kubenswrapper[4923]: I1128 11:13:08.910959 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 28 11:13:08 crc kubenswrapper[4923]: I1128 11:13:08.975701 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.017123 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.032401 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.052924 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.093649 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.139992 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.165738 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.206472 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.315765 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.360500 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.381082 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.392833 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.396176 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.416245 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.515401 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.531326 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.541774 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.676412 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.702408 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.734779 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.743915 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.880053 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.929254 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 28 11:13:09 crc kubenswrapper[4923]: I1128 11:13:09.949651 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 28 11:13:10 crc kubenswrapper[4923]: I1128 11:13:10.040608 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 28 11:13:10 crc kubenswrapper[4923]: I1128 11:13:10.076922 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 11:13:10 crc kubenswrapper[4923]: I1128 11:13:10.115546 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 28 11:13:10 crc kubenswrapper[4923]: I1128 11:13:10.264523 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 28 11:13:10 crc kubenswrapper[4923]: I1128 11:13:10.293625 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 11:13:10 crc kubenswrapper[4923]: I1128 11:13:10.296139 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 28 11:13:10 crc kubenswrapper[4923]: I1128 11:13:10.451296 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 28 11:13:10 crc kubenswrapper[4923]: I1128 11:13:10.496835 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 28 11:13:10 crc kubenswrapper[4923]: I1128 11:13:10.601577 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 28 11:13:10 crc kubenswrapper[4923]: I1128 11:13:10.645437 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 28 11:13:10 crc kubenswrapper[4923]: I1128 11:13:10.774832 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 28 11:13:10 crc kubenswrapper[4923]: I1128 11:13:10.800853 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 28 11:13:10 crc kubenswrapper[4923]: I1128 11:13:10.818872 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 28 11:13:10 crc kubenswrapper[4923]: I1128 11:13:10.851360 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-65c4c84884-p8ltw"] Nov 28 11:13:10 crc kubenswrapper[4923]: I1128 11:13:10.917786 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.035784 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.058021 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.078348 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.088276 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.104695 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.172997 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.315803 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.329483 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-65c4c84884-p8ltw"] Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.391404 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.413688 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.417774 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.491603 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.492338 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" event={"ID":"b9d0f7a6-d281-474e-b196-9208ad74cba4","Type":"ContainerStarted","Data":"70bfe1ff2fd1633c2b42f589382d94fca7e02273d3e9ddb5645f59e1afb4c26e"} Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.527695 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.633870 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.779465 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.828522 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.869968 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.907516 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 28 11:13:11 crc kubenswrapper[4923]: I1128 11:13:11.992337 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 28 11:13:12 crc kubenswrapper[4923]: I1128 11:13:12.007231 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 28 11:13:12 crc kubenswrapper[4923]: I1128 11:13:12.059792 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 11:13:12 crc kubenswrapper[4923]: I1128 11:13:12.224456 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 28 11:13:12 crc kubenswrapper[4923]: I1128 11:13:12.310686 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 28 11:13:12 crc kubenswrapper[4923]: I1128 11:13:12.310686 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 28 11:13:12 crc kubenswrapper[4923]: I1128 11:13:12.315589 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 28 11:13:12 crc kubenswrapper[4923]: I1128 11:13:12.419919 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 11:13:12 crc kubenswrapper[4923]: I1128 11:13:12.474825 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 28 11:13:12 crc kubenswrapper[4923]: I1128 11:13:12.503424 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-65c4c84884-p8ltw_b9d0f7a6-d281-474e-b196-9208ad74cba4/oauth-openshift/0.log" Nov 28 11:13:12 crc kubenswrapper[4923]: I1128 11:13:12.503555 4923 generic.go:334] "Generic (PLEG): container finished" podID="b9d0f7a6-d281-474e-b196-9208ad74cba4" containerID="0f0ffb81268195f2e504b3ad9be9a3eb6382b09fd2235a6fa2f7ed40927cd552" exitCode=255 Nov 28 11:13:12 crc kubenswrapper[4923]: I1128 11:13:12.503617 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" event={"ID":"b9d0f7a6-d281-474e-b196-9208ad74cba4","Type":"ContainerDied","Data":"0f0ffb81268195f2e504b3ad9be9a3eb6382b09fd2235a6fa2f7ed40927cd552"} Nov 28 11:13:12 crc kubenswrapper[4923]: I1128 11:13:12.504405 4923 scope.go:117] "RemoveContainer" containerID="0f0ffb81268195f2e504b3ad9be9a3eb6382b09fd2235a6fa2f7ed40927cd552" Nov 28 11:13:12 crc kubenswrapper[4923]: I1128 11:13:12.511550 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 28 11:13:12 crc kubenswrapper[4923]: I1128 11:13:12.634195 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 28 11:13:12 crc kubenswrapper[4923]: I1128 11:13:12.854240 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.010713 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.034754 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.058106 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.066509 4923 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.099496 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.146271 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.355284 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.391779 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.440861 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.457516 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.509244 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-65c4c84884-p8ltw_b9d0f7a6-d281-474e-b196-9208ad74cba4/oauth-openshift/0.log" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.509523 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" event={"ID":"b9d0f7a6-d281-474e-b196-9208ad74cba4","Type":"ContainerStarted","Data":"09a351ae2814be325adc1446727ffdbabe299604d844057b84f68a74f609d63d"} Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.509768 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.516738 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.547834 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-65c4c84884-p8ltw" podStartSLOduration=70.547818384 podStartE2EDuration="1m10.547818384s" podCreationTimestamp="2025-11-28 11:12:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:13:13.529666665 +0000 UTC m=+272.658350875" watchObservedRunningTime="2025-11-28 11:13:13.547818384 +0000 UTC m=+272.676502594" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.553773 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.569955 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.598996 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.712609 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.787122 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.902526 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.912452 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 28 11:13:13 crc kubenswrapper[4923]: I1128 11:13:13.970049 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.013547 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.157459 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.187227 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.202626 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.203381 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.234252 4923 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.342784 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.344337 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.367533 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.602991 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.713976 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.747963 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.766296 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.837668 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.865018 4923 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.865325 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://38064a9c80b0288cd193292dd4b356e9aa5202fb02dc73e3b199387e46163d03" gracePeriod=5 Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.875299 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.887051 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 11:13:14 crc kubenswrapper[4923]: I1128 11:13:14.978594 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 11:13:15 crc kubenswrapper[4923]: I1128 11:13:15.011079 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 28 11:13:15 crc kubenswrapper[4923]: I1128 11:13:15.098354 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 28 11:13:15 crc kubenswrapper[4923]: I1128 11:13:15.465713 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 28 11:13:15 crc kubenswrapper[4923]: I1128 11:13:15.470481 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 28 11:13:15 crc kubenswrapper[4923]: I1128 11:13:15.470545 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 28 11:13:15 crc kubenswrapper[4923]: I1128 11:13:15.470612 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 28 11:13:15 crc kubenswrapper[4923]: I1128 11:13:15.471812 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 11:13:15 crc kubenswrapper[4923]: I1128 11:13:15.476392 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 28 11:13:15 crc kubenswrapper[4923]: I1128 11:13:15.779839 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 28 11:13:15 crc kubenswrapper[4923]: I1128 11:13:15.798748 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 28 11:13:15 crc kubenswrapper[4923]: I1128 11:13:15.823155 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 28 11:13:15 crc kubenswrapper[4923]: I1128 11:13:15.873131 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 28 11:13:15 crc kubenswrapper[4923]: I1128 11:13:15.975581 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 28 11:13:16 crc kubenswrapper[4923]: I1128 11:13:16.134899 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 28 11:13:16 crc kubenswrapper[4923]: I1128 11:13:16.345151 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 28 11:13:16 crc kubenswrapper[4923]: I1128 11:13:16.444158 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 28 11:13:16 crc kubenswrapper[4923]: I1128 11:13:16.523143 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 28 11:13:16 crc kubenswrapper[4923]: I1128 11:13:16.525922 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 28 11:13:16 crc kubenswrapper[4923]: I1128 11:13:16.541447 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 28 11:13:16 crc kubenswrapper[4923]: I1128 11:13:16.606280 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 28 11:13:16 crc kubenswrapper[4923]: I1128 11:13:16.647532 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 28 11:13:16 crc kubenswrapper[4923]: I1128 11:13:16.729403 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 28 11:13:16 crc kubenswrapper[4923]: I1128 11:13:16.738376 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 28 11:13:16 crc kubenswrapper[4923]: I1128 11:13:16.843304 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 28 11:13:16 crc kubenswrapper[4923]: I1128 11:13:16.858695 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 28 11:13:16 crc kubenswrapper[4923]: I1128 11:13:16.950603 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 28 11:13:16 crc kubenswrapper[4923]: I1128 11:13:16.991197 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 28 11:13:17 crc kubenswrapper[4923]: I1128 11:13:17.034612 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 28 11:13:17 crc kubenswrapper[4923]: I1128 11:13:17.368956 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 28 11:13:17 crc kubenswrapper[4923]: I1128 11:13:17.501975 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 28 11:13:17 crc kubenswrapper[4923]: I1128 11:13:17.525536 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 28 11:13:17 crc kubenswrapper[4923]: I1128 11:13:17.550532 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 28 11:13:17 crc kubenswrapper[4923]: I1128 11:13:17.554017 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 28 11:13:17 crc kubenswrapper[4923]: I1128 11:13:17.758611 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 28 11:13:17 crc kubenswrapper[4923]: I1128 11:13:17.789343 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 28 11:13:17 crc kubenswrapper[4923]: I1128 11:13:17.907010 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 28 11:13:17 crc kubenswrapper[4923]: I1128 11:13:17.918693 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 28 11:13:17 crc kubenswrapper[4923]: I1128 11:13:17.945878 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 28 11:13:18 crc kubenswrapper[4923]: I1128 11:13:18.037605 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 28 11:13:18 crc kubenswrapper[4923]: I1128 11:13:18.271512 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 28 11:13:18 crc kubenswrapper[4923]: I1128 11:13:18.366024 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 28 11:13:18 crc kubenswrapper[4923]: I1128 11:13:18.379519 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 28 11:13:18 crc kubenswrapper[4923]: I1128 11:13:18.452841 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 28 11:13:18 crc kubenswrapper[4923]: I1128 11:13:18.475614 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 28 11:13:18 crc kubenswrapper[4923]: I1128 11:13:18.568248 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 28 11:13:18 crc kubenswrapper[4923]: I1128 11:13:18.642498 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 28 11:13:18 crc kubenswrapper[4923]: I1128 11:13:18.705295 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 11:13:18 crc kubenswrapper[4923]: I1128 11:13:18.793616 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 28 11:13:19 crc kubenswrapper[4923]: I1128 11:13:19.134392 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 28 11:13:19 crc kubenswrapper[4923]: I1128 11:13:19.382316 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 28 11:13:19 crc kubenswrapper[4923]: I1128 11:13:19.473085 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 28 11:13:19 crc kubenswrapper[4923]: I1128 11:13:19.642824 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 28 11:13:19 crc kubenswrapper[4923]: I1128 11:13:19.744048 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 11:13:19 crc kubenswrapper[4923]: I1128 11:13:19.764685 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.192478 4923 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.446011 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.446308 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.553773 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.553853 4923 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="38064a9c80b0288cd193292dd4b356e9aa5202fb02dc73e3b199387e46163d03" exitCode=137 Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.553906 4923 scope.go:117] "RemoveContainer" containerID="38064a9c80b0288cd193292dd4b356e9aa5202fb02dc73e3b199387e46163d03" Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.553972 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.570469 4923 scope.go:117] "RemoveContainer" containerID="38064a9c80b0288cd193292dd4b356e9aa5202fb02dc73e3b199387e46163d03" Nov 28 11:13:20 crc kubenswrapper[4923]: E1128 11:13:20.570859 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38064a9c80b0288cd193292dd4b356e9aa5202fb02dc73e3b199387e46163d03\": container with ID starting with 38064a9c80b0288cd193292dd4b356e9aa5202fb02dc73e3b199387e46163d03 not found: ID does not exist" containerID="38064a9c80b0288cd193292dd4b356e9aa5202fb02dc73e3b199387e46163d03" Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.570887 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38064a9c80b0288cd193292dd4b356e9aa5202fb02dc73e3b199387e46163d03"} err="failed to get container status \"38064a9c80b0288cd193292dd4b356e9aa5202fb02dc73e3b199387e46163d03\": rpc error: code = NotFound desc = could not find container \"38064a9c80b0288cd193292dd4b356e9aa5202fb02dc73e3b199387e46163d03\": container with ID starting with 38064a9c80b0288cd193292dd4b356e9aa5202fb02dc73e3b199387e46163d03 not found: ID does not exist" Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.606962 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.617576 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.617642 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.617698 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.617779 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.617789 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.617922 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.617980 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.618032 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.618079 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.618514 4923 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.618537 4923 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.618555 4923 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.618602 4923 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.631083 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:13:20 crc kubenswrapper[4923]: I1128 11:13:20.720444 4923 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 11:13:21 crc kubenswrapper[4923]: I1128 11:13:21.179321 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 28 11:13:21 crc kubenswrapper[4923]: I1128 11:13:21.180055 4923 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Nov 28 11:13:21 crc kubenswrapper[4923]: I1128 11:13:21.192087 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 11:13:21 crc kubenswrapper[4923]: I1128 11:13:21.192131 4923 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="4c7c02e9-3323-4b2a-abc7-c530f9c1c7e3" Nov 28 11:13:21 crc kubenswrapper[4923]: I1128 11:13:21.194787 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 11:13:21 crc kubenswrapper[4923]: I1128 11:13:21.194823 4923 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="4c7c02e9-3323-4b2a-abc7-c530f9c1c7e3" Nov 28 11:13:33 crc kubenswrapper[4923]: I1128 11:13:33.635472 4923 generic.go:334] "Generic (PLEG): container finished" podID="bc404fb9-c265-4265-84e8-e3dd111fae9a" containerID="00403708fb60cdd5ce416fdf4b557eed99855044ed47656236d9eec393278396" exitCode=0 Nov 28 11:13:33 crc kubenswrapper[4923]: I1128 11:13:33.636063 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" event={"ID":"bc404fb9-c265-4265-84e8-e3dd111fae9a","Type":"ContainerDied","Data":"00403708fb60cdd5ce416fdf4b557eed99855044ed47656236d9eec393278396"} Nov 28 11:13:33 crc kubenswrapper[4923]: I1128 11:13:33.636751 4923 scope.go:117] "RemoveContainer" containerID="00403708fb60cdd5ce416fdf4b557eed99855044ed47656236d9eec393278396" Nov 28 11:13:34 crc kubenswrapper[4923]: I1128 11:13:34.643566 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" event={"ID":"bc404fb9-c265-4265-84e8-e3dd111fae9a","Type":"ContainerStarted","Data":"872d6898468b2c0649316ec85c5451bff29fe4aafd305b008db81c94839c2c4f"} Nov 28 11:13:34 crc kubenswrapper[4923]: I1128 11:13:34.644715 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" Nov 28 11:13:34 crc kubenswrapper[4923]: I1128 11:13:34.648591 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.146388 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7l2lz"] Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.147141 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" podUID="7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8" containerName="controller-manager" containerID="cri-o://22071f7c5139b03a49bf4d4f5b55c17f93bd7c8cf3da175d3662380bc8dc5cd8" gracePeriod=30 Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.241576 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx"] Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.241826 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" podUID="cdedfd6e-9082-4411-b128-fc9806c67bd3" containerName="route-controller-manager" containerID="cri-o://72002c716bb9efd84e0ba650c8c77eb6ae7f82c4b4a8780b28653cb26db489d9" gracePeriod=30 Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.532301 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.600471 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.658904 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-config\") pod \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.658972 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-serving-cert\") pod \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.659039 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-proxy-ca-bundles\") pod \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.659081 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-client-ca\") pod \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.659145 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbhbm\" (UniqueName: \"kubernetes.io/projected/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-kube-api-access-bbhbm\") pod \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\" (UID: \"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8\") " Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.660054 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8" (UID: "7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.660062 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-client-ca" (OuterVolumeSpecName: "client-ca") pod "7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8" (UID: "7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.660188 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-config" (OuterVolumeSpecName: "config") pod "7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8" (UID: "7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.664235 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8" (UID: "7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.664267 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-kube-api-access-bbhbm" (OuterVolumeSpecName: "kube-api-access-bbhbm") pod "7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8" (UID: "7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8"). InnerVolumeSpecName "kube-api-access-bbhbm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.742398 4923 generic.go:334] "Generic (PLEG): container finished" podID="7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8" containerID="22071f7c5139b03a49bf4d4f5b55c17f93bd7c8cf3da175d3662380bc8dc5cd8" exitCode=0 Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.742477 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" event={"ID":"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8","Type":"ContainerDied","Data":"22071f7c5139b03a49bf4d4f5b55c17f93bd7c8cf3da175d3662380bc8dc5cd8"} Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.742523 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" event={"ID":"7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8","Type":"ContainerDied","Data":"64a7de2be060b261c9f4078a3e86033f251de75cc6a347b629874adb430fe0f8"} Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.742452 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-7l2lz" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.742542 4923 scope.go:117] "RemoveContainer" containerID="22071f7c5139b03a49bf4d4f5b55c17f93bd7c8cf3da175d3662380bc8dc5cd8" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.748064 4923 generic.go:334] "Generic (PLEG): container finished" podID="cdedfd6e-9082-4411-b128-fc9806c67bd3" containerID="72002c716bb9efd84e0ba650c8c77eb6ae7f82c4b4a8780b28653cb26db489d9" exitCode=0 Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.748128 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" event={"ID":"cdedfd6e-9082-4411-b128-fc9806c67bd3","Type":"ContainerDied","Data":"72002c716bb9efd84e0ba650c8c77eb6ae7f82c4b4a8780b28653cb26db489d9"} Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.748167 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" event={"ID":"cdedfd6e-9082-4411-b128-fc9806c67bd3","Type":"ContainerDied","Data":"cc1eed32d6f09d5be859793239543114fee6e6010605998b1a7da3464b0f47e6"} Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.748239 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.760294 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-config\") pod \"cdedfd6e-9082-4411-b128-fc9806c67bd3\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.760424 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdedfd6e-9082-4411-b128-fc9806c67bd3-serving-cert\") pod \"cdedfd6e-9082-4411-b128-fc9806c67bd3\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.760551 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-client-ca\") pod \"cdedfd6e-9082-4411-b128-fc9806c67bd3\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.760586 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvb8s\" (UniqueName: \"kubernetes.io/projected/cdedfd6e-9082-4411-b128-fc9806c67bd3-kube-api-access-rvb8s\") pod \"cdedfd6e-9082-4411-b128-fc9806c67bd3\" (UID: \"cdedfd6e-9082-4411-b128-fc9806c67bd3\") " Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.760906 4923 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.760927 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbhbm\" (UniqueName: \"kubernetes.io/projected/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-kube-api-access-bbhbm\") on node \"crc\" DevicePath \"\"" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.760972 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.760989 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.761009 4923 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.761216 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-config" (OuterVolumeSpecName: "config") pod "cdedfd6e-9082-4411-b128-fc9806c67bd3" (UID: "cdedfd6e-9082-4411-b128-fc9806c67bd3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.762713 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-client-ca" (OuterVolumeSpecName: "client-ca") pod "cdedfd6e-9082-4411-b128-fc9806c67bd3" (UID: "cdedfd6e-9082-4411-b128-fc9806c67bd3"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.770394 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdedfd6e-9082-4411-b128-fc9806c67bd3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "cdedfd6e-9082-4411-b128-fc9806c67bd3" (UID: "cdedfd6e-9082-4411-b128-fc9806c67bd3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.771742 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cdedfd6e-9082-4411-b128-fc9806c67bd3-kube-api-access-rvb8s" (OuterVolumeSpecName: "kube-api-access-rvb8s") pod "cdedfd6e-9082-4411-b128-fc9806c67bd3" (UID: "cdedfd6e-9082-4411-b128-fc9806c67bd3"). InnerVolumeSpecName "kube-api-access-rvb8s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.777136 4923 scope.go:117] "RemoveContainer" containerID="22071f7c5139b03a49bf4d4f5b55c17f93bd7c8cf3da175d3662380bc8dc5cd8" Nov 28 11:13:47 crc kubenswrapper[4923]: E1128 11:13:47.777773 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22071f7c5139b03a49bf4d4f5b55c17f93bd7c8cf3da175d3662380bc8dc5cd8\": container with ID starting with 22071f7c5139b03a49bf4d4f5b55c17f93bd7c8cf3da175d3662380bc8dc5cd8 not found: ID does not exist" containerID="22071f7c5139b03a49bf4d4f5b55c17f93bd7c8cf3da175d3662380bc8dc5cd8" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.777810 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22071f7c5139b03a49bf4d4f5b55c17f93bd7c8cf3da175d3662380bc8dc5cd8"} err="failed to get container status \"22071f7c5139b03a49bf4d4f5b55c17f93bd7c8cf3da175d3662380bc8dc5cd8\": rpc error: code = NotFound desc = could not find container \"22071f7c5139b03a49bf4d4f5b55c17f93bd7c8cf3da175d3662380bc8dc5cd8\": container with ID starting with 22071f7c5139b03a49bf4d4f5b55c17f93bd7c8cf3da175d3662380bc8dc5cd8 not found: ID does not exist" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.777862 4923 scope.go:117] "RemoveContainer" containerID="72002c716bb9efd84e0ba650c8c77eb6ae7f82c4b4a8780b28653cb26db489d9" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.788778 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7l2lz"] Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.794865 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7l2lz"] Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.798551 4923 scope.go:117] "RemoveContainer" containerID="72002c716bb9efd84e0ba650c8c77eb6ae7f82c4b4a8780b28653cb26db489d9" Nov 28 11:13:47 crc kubenswrapper[4923]: E1128 11:13:47.799031 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72002c716bb9efd84e0ba650c8c77eb6ae7f82c4b4a8780b28653cb26db489d9\": container with ID starting with 72002c716bb9efd84e0ba650c8c77eb6ae7f82c4b4a8780b28653cb26db489d9 not found: ID does not exist" containerID="72002c716bb9efd84e0ba650c8c77eb6ae7f82c4b4a8780b28653cb26db489d9" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.799070 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72002c716bb9efd84e0ba650c8c77eb6ae7f82c4b4a8780b28653cb26db489d9"} err="failed to get container status \"72002c716bb9efd84e0ba650c8c77eb6ae7f82c4b4a8780b28653cb26db489d9\": rpc error: code = NotFound desc = could not find container \"72002c716bb9efd84e0ba650c8c77eb6ae7f82c4b4a8780b28653cb26db489d9\": container with ID starting with 72002c716bb9efd84e0ba650c8c77eb6ae7f82c4b4a8780b28653cb26db489d9 not found: ID does not exist" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.861971 4923 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.862024 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvb8s\" (UniqueName: \"kubernetes.io/projected/cdedfd6e-9082-4411-b128-fc9806c67bd3-kube-api-access-rvb8s\") on node \"crc\" DevicePath \"\"" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.862076 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdedfd6e-9082-4411-b128-fc9806c67bd3-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:13:47 crc kubenswrapper[4923]: I1128 11:13:47.862096 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdedfd6e-9082-4411-b128-fc9806c67bd3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.089495 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx"] Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.095873 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-p7flx"] Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.482979 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-f7fc999fd-26vbx"] Nov 28 11:13:48 crc kubenswrapper[4923]: E1128 11:13:48.483311 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8" containerName="controller-manager" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.483336 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8" containerName="controller-manager" Nov 28 11:13:48 crc kubenswrapper[4923]: E1128 11:13:48.483355 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.483367 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 11:13:48 crc kubenswrapper[4923]: E1128 11:13:48.483389 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdedfd6e-9082-4411-b128-fc9806c67bd3" containerName="route-controller-manager" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.483402 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdedfd6e-9082-4411-b128-fc9806c67bd3" containerName="route-controller-manager" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.483579 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.483600 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8" containerName="controller-manager" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.483616 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdedfd6e-9082-4411-b128-fc9806c67bd3" containerName="route-controller-manager" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.484267 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.487049 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.487060 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.487113 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv"] Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.487481 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.487516 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.487913 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.488618 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.491126 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.491136 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.491798 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.491899 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.497254 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.497631 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.498565 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.505499 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.508794 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-f7fc999fd-26vbx"] Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.557175 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv"] Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.682966 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8e02b609-01b8-4c38-9bab-514d9e7cef68-proxy-ca-bundles\") pod \"controller-manager-f7fc999fd-26vbx\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.683020 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxzp2\" (UniqueName: \"kubernetes.io/projected/e449e370-3ce8-49ca-8aa2-72ef17c907ec-kube-api-access-hxzp2\") pod \"route-controller-manager-59f4f64df4-kl4qv\" (UID: \"e449e370-3ce8-49ca-8aa2-72ef17c907ec\") " pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.683053 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e449e370-3ce8-49ca-8aa2-72ef17c907ec-serving-cert\") pod \"route-controller-manager-59f4f64df4-kl4qv\" (UID: \"e449e370-3ce8-49ca-8aa2-72ef17c907ec\") " pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.683078 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e02b609-01b8-4c38-9bab-514d9e7cef68-config\") pod \"controller-manager-f7fc999fd-26vbx\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.683104 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjs4d\" (UniqueName: \"kubernetes.io/projected/8e02b609-01b8-4c38-9bab-514d9e7cef68-kube-api-access-kjs4d\") pod \"controller-manager-f7fc999fd-26vbx\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.683126 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8e02b609-01b8-4c38-9bab-514d9e7cef68-client-ca\") pod \"controller-manager-f7fc999fd-26vbx\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.683147 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8e02b609-01b8-4c38-9bab-514d9e7cef68-serving-cert\") pod \"controller-manager-f7fc999fd-26vbx\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.683169 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e449e370-3ce8-49ca-8aa2-72ef17c907ec-client-ca\") pod \"route-controller-manager-59f4f64df4-kl4qv\" (UID: \"e449e370-3ce8-49ca-8aa2-72ef17c907ec\") " pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.683199 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e449e370-3ce8-49ca-8aa2-72ef17c907ec-config\") pod \"route-controller-manager-59f4f64df4-kl4qv\" (UID: \"e449e370-3ce8-49ca-8aa2-72ef17c907ec\") " pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.783508 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e449e370-3ce8-49ca-8aa2-72ef17c907ec-serving-cert\") pod \"route-controller-manager-59f4f64df4-kl4qv\" (UID: \"e449e370-3ce8-49ca-8aa2-72ef17c907ec\") " pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.783772 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e02b609-01b8-4c38-9bab-514d9e7cef68-config\") pod \"controller-manager-f7fc999fd-26vbx\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.783923 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjs4d\" (UniqueName: \"kubernetes.io/projected/8e02b609-01b8-4c38-9bab-514d9e7cef68-kube-api-access-kjs4d\") pod \"controller-manager-f7fc999fd-26vbx\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.784065 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8e02b609-01b8-4c38-9bab-514d9e7cef68-client-ca\") pod \"controller-manager-f7fc999fd-26vbx\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.784164 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8e02b609-01b8-4c38-9bab-514d9e7cef68-serving-cert\") pod \"controller-manager-f7fc999fd-26vbx\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.784276 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e449e370-3ce8-49ca-8aa2-72ef17c907ec-client-ca\") pod \"route-controller-manager-59f4f64df4-kl4qv\" (UID: \"e449e370-3ce8-49ca-8aa2-72ef17c907ec\") " pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.784388 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e449e370-3ce8-49ca-8aa2-72ef17c907ec-config\") pod \"route-controller-manager-59f4f64df4-kl4qv\" (UID: \"e449e370-3ce8-49ca-8aa2-72ef17c907ec\") " pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.784530 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8e02b609-01b8-4c38-9bab-514d9e7cef68-proxy-ca-bundles\") pod \"controller-manager-f7fc999fd-26vbx\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.784638 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxzp2\" (UniqueName: \"kubernetes.io/projected/e449e370-3ce8-49ca-8aa2-72ef17c907ec-kube-api-access-hxzp2\") pod \"route-controller-manager-59f4f64df4-kl4qv\" (UID: \"e449e370-3ce8-49ca-8aa2-72ef17c907ec\") " pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.785131 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8e02b609-01b8-4c38-9bab-514d9e7cef68-client-ca\") pod \"controller-manager-f7fc999fd-26vbx\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.785354 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e449e370-3ce8-49ca-8aa2-72ef17c907ec-config\") pod \"route-controller-manager-59f4f64df4-kl4qv\" (UID: \"e449e370-3ce8-49ca-8aa2-72ef17c907ec\") " pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.785615 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e449e370-3ce8-49ca-8aa2-72ef17c907ec-client-ca\") pod \"route-controller-manager-59f4f64df4-kl4qv\" (UID: \"e449e370-3ce8-49ca-8aa2-72ef17c907ec\") " pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.786182 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8e02b609-01b8-4c38-9bab-514d9e7cef68-proxy-ca-bundles\") pod \"controller-manager-f7fc999fd-26vbx\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.788791 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e02b609-01b8-4c38-9bab-514d9e7cef68-config\") pod \"controller-manager-f7fc999fd-26vbx\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.790430 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8e02b609-01b8-4c38-9bab-514d9e7cef68-serving-cert\") pod \"controller-manager-f7fc999fd-26vbx\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.794593 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e449e370-3ce8-49ca-8aa2-72ef17c907ec-serving-cert\") pod \"route-controller-manager-59f4f64df4-kl4qv\" (UID: \"e449e370-3ce8-49ca-8aa2-72ef17c907ec\") " pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.813626 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjs4d\" (UniqueName: \"kubernetes.io/projected/8e02b609-01b8-4c38-9bab-514d9e7cef68-kube-api-access-kjs4d\") pod \"controller-manager-f7fc999fd-26vbx\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:48 crc kubenswrapper[4923]: I1128 11:13:48.824515 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxzp2\" (UniqueName: \"kubernetes.io/projected/e449e370-3ce8-49ca-8aa2-72ef17c907ec-kube-api-access-hxzp2\") pod \"route-controller-manager-59f4f64df4-kl4qv\" (UID: \"e449e370-3ce8-49ca-8aa2-72ef17c907ec\") " pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" Nov 28 11:13:49 crc kubenswrapper[4923]: I1128 11:13:49.098481 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:49 crc kubenswrapper[4923]: I1128 11:13:49.108162 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" Nov 28 11:13:49 crc kubenswrapper[4923]: I1128 11:13:49.180408 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8" path="/var/lib/kubelet/pods/7a3aa8b5-2de7-476e-bc85-dbbe5281e8a8/volumes" Nov 28 11:13:49 crc kubenswrapper[4923]: I1128 11:13:49.181615 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cdedfd6e-9082-4411-b128-fc9806c67bd3" path="/var/lib/kubelet/pods/cdedfd6e-9082-4411-b128-fc9806c67bd3/volumes" Nov 28 11:13:49 crc kubenswrapper[4923]: I1128 11:13:49.396458 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-f7fc999fd-26vbx"] Nov 28 11:13:49 crc kubenswrapper[4923]: I1128 11:13:49.437115 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv"] Nov 28 11:13:49 crc kubenswrapper[4923]: W1128 11:13:49.442156 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode449e370_3ce8_49ca_8aa2_72ef17c907ec.slice/crio-aa52fc3f423c025d47b335925a86e11b3b289149edac7a356375081c9ee17843 WatchSource:0}: Error finding container aa52fc3f423c025d47b335925a86e11b3b289149edac7a356375081c9ee17843: Status 404 returned error can't find the container with id aa52fc3f423c025d47b335925a86e11b3b289149edac7a356375081c9ee17843 Nov 28 11:13:49 crc kubenswrapper[4923]: I1128 11:13:49.761550 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" event={"ID":"e449e370-3ce8-49ca-8aa2-72ef17c907ec","Type":"ContainerStarted","Data":"b62d35f9d080bac62f7b9c918d897d21d70c205ef1c13fa35e8023a0dce3e46d"} Nov 28 11:13:49 crc kubenswrapper[4923]: I1128 11:13:49.761608 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" event={"ID":"e449e370-3ce8-49ca-8aa2-72ef17c907ec","Type":"ContainerStarted","Data":"aa52fc3f423c025d47b335925a86e11b3b289149edac7a356375081c9ee17843"} Nov 28 11:13:49 crc kubenswrapper[4923]: I1128 11:13:49.762887 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" Nov 28 11:13:49 crc kubenswrapper[4923]: I1128 11:13:49.766474 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" event={"ID":"8e02b609-01b8-4c38-9bab-514d9e7cef68","Type":"ContainerStarted","Data":"8bb0cf563ec1ac56eb78b130fb056d8f4bb760280d430a9ef7b7fabf2b5be918"} Nov 28 11:13:49 crc kubenswrapper[4923]: I1128 11:13:49.766542 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" event={"ID":"8e02b609-01b8-4c38-9bab-514d9e7cef68","Type":"ContainerStarted","Data":"9404a79e9879065670c0a240ccd39ae75d3cb750da8f36e1f10e09999aea225a"} Nov 28 11:13:49 crc kubenswrapper[4923]: I1128 11:13:49.766766 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:49 crc kubenswrapper[4923]: I1128 11:13:49.786825 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:13:49 crc kubenswrapper[4923]: I1128 11:13:49.803329 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" podStartSLOduration=2.803315401 podStartE2EDuration="2.803315401s" podCreationTimestamp="2025-11-28 11:13:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:13:49.798208155 +0000 UTC m=+308.926892365" watchObservedRunningTime="2025-11-28 11:13:49.803315401 +0000 UTC m=+308.931999621" Nov 28 11:13:49 crc kubenswrapper[4923]: I1128 11:13:49.803855 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-59f4f64df4-kl4qv" Nov 28 11:13:49 crc kubenswrapper[4923]: I1128 11:13:49.822392 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" podStartSLOduration=2.822375895 podStartE2EDuration="2.822375895s" podCreationTimestamp="2025-11-28 11:13:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:13:49.820378788 +0000 UTC m=+308.949063008" watchObservedRunningTime="2025-11-28 11:13:49.822375895 +0000 UTC m=+308.951060105" Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.098467 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-f7fc999fd-26vbx"] Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.099653 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" podUID="8e02b609-01b8-4c38-9bab-514d9e7cef68" containerName="controller-manager" containerID="cri-o://8bb0cf563ec1ac56eb78b130fb056d8f4bb760280d430a9ef7b7fabf2b5be918" gracePeriod=30 Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.670247 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.766711 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kjs4d\" (UniqueName: \"kubernetes.io/projected/8e02b609-01b8-4c38-9bab-514d9e7cef68-kube-api-access-kjs4d\") pod \"8e02b609-01b8-4c38-9bab-514d9e7cef68\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.766813 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8e02b609-01b8-4c38-9bab-514d9e7cef68-client-ca\") pod \"8e02b609-01b8-4c38-9bab-514d9e7cef68\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.766869 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8e02b609-01b8-4c38-9bab-514d9e7cef68-proxy-ca-bundles\") pod \"8e02b609-01b8-4c38-9bab-514d9e7cef68\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.766914 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e02b609-01b8-4c38-9bab-514d9e7cef68-config\") pod \"8e02b609-01b8-4c38-9bab-514d9e7cef68\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.767013 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8e02b609-01b8-4c38-9bab-514d9e7cef68-serving-cert\") pod \"8e02b609-01b8-4c38-9bab-514d9e7cef68\" (UID: \"8e02b609-01b8-4c38-9bab-514d9e7cef68\") " Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.768694 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e02b609-01b8-4c38-9bab-514d9e7cef68-config" (OuterVolumeSpecName: "config") pod "8e02b609-01b8-4c38-9bab-514d9e7cef68" (UID: "8e02b609-01b8-4c38-9bab-514d9e7cef68"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.768703 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e02b609-01b8-4c38-9bab-514d9e7cef68-client-ca" (OuterVolumeSpecName: "client-ca") pod "8e02b609-01b8-4c38-9bab-514d9e7cef68" (UID: "8e02b609-01b8-4c38-9bab-514d9e7cef68"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.769095 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e02b609-01b8-4c38-9bab-514d9e7cef68-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "8e02b609-01b8-4c38-9bab-514d9e7cef68" (UID: "8e02b609-01b8-4c38-9bab-514d9e7cef68"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.777368 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e02b609-01b8-4c38-9bab-514d9e7cef68-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8e02b609-01b8-4c38-9bab-514d9e7cef68" (UID: "8e02b609-01b8-4c38-9bab-514d9e7cef68"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.779096 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e02b609-01b8-4c38-9bab-514d9e7cef68-kube-api-access-kjs4d" (OuterVolumeSpecName: "kube-api-access-kjs4d") pod "8e02b609-01b8-4c38-9bab-514d9e7cef68" (UID: "8e02b609-01b8-4c38-9bab-514d9e7cef68"). InnerVolumeSpecName "kube-api-access-kjs4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.869404 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kjs4d\" (UniqueName: \"kubernetes.io/projected/8e02b609-01b8-4c38-9bab-514d9e7cef68-kube-api-access-kjs4d\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.869464 4923 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8e02b609-01b8-4c38-9bab-514d9e7cef68-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.869488 4923 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8e02b609-01b8-4c38-9bab-514d9e7cef68-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.869511 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e02b609-01b8-4c38-9bab-514d9e7cef68-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.869534 4923 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8e02b609-01b8-4c38-9bab-514d9e7cef68-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.888571 4923 generic.go:334] "Generic (PLEG): container finished" podID="8e02b609-01b8-4c38-9bab-514d9e7cef68" containerID="8bb0cf563ec1ac56eb78b130fb056d8f4bb760280d430a9ef7b7fabf2b5be918" exitCode=0 Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.888605 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" event={"ID":"8e02b609-01b8-4c38-9bab-514d9e7cef68","Type":"ContainerDied","Data":"8bb0cf563ec1ac56eb78b130fb056d8f4bb760280d430a9ef7b7fabf2b5be918"} Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.888633 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" event={"ID":"8e02b609-01b8-4c38-9bab-514d9e7cef68","Type":"ContainerDied","Data":"9404a79e9879065670c0a240ccd39ae75d3cb750da8f36e1f10e09999aea225a"} Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.888684 4923 scope.go:117] "RemoveContainer" containerID="8bb0cf563ec1ac56eb78b130fb056d8f4bb760280d430a9ef7b7fabf2b5be918" Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.888669 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-f7fc999fd-26vbx" Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.923789 4923 scope.go:117] "RemoveContainer" containerID="8bb0cf563ec1ac56eb78b130fb056d8f4bb760280d430a9ef7b7fabf2b5be918" Nov 28 11:14:07 crc kubenswrapper[4923]: E1128 11:14:07.924455 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8bb0cf563ec1ac56eb78b130fb056d8f4bb760280d430a9ef7b7fabf2b5be918\": container with ID starting with 8bb0cf563ec1ac56eb78b130fb056d8f4bb760280d430a9ef7b7fabf2b5be918 not found: ID does not exist" containerID="8bb0cf563ec1ac56eb78b130fb056d8f4bb760280d430a9ef7b7fabf2b5be918" Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.924500 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bb0cf563ec1ac56eb78b130fb056d8f4bb760280d430a9ef7b7fabf2b5be918"} err="failed to get container status \"8bb0cf563ec1ac56eb78b130fb056d8f4bb760280d430a9ef7b7fabf2b5be918\": rpc error: code = NotFound desc = could not find container \"8bb0cf563ec1ac56eb78b130fb056d8f4bb760280d430a9ef7b7fabf2b5be918\": container with ID starting with 8bb0cf563ec1ac56eb78b130fb056d8f4bb760280d430a9ef7b7fabf2b5be918 not found: ID does not exist" Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.934643 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-f7fc999fd-26vbx"] Nov 28 11:14:07 crc kubenswrapper[4923]: I1128 11:14:07.962749 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-f7fc999fd-26vbx"] Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.495492 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m"] Nov 28 11:14:08 crc kubenswrapper[4923]: E1128 11:14:08.497249 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e02b609-01b8-4c38-9bab-514d9e7cef68" containerName="controller-manager" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.497426 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e02b609-01b8-4c38-9bab-514d9e7cef68" containerName="controller-manager" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.497745 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e02b609-01b8-4c38-9bab-514d9e7cef68" containerName="controller-manager" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.498502 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.500508 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.500812 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.502147 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.502438 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.503650 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.505089 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.513360 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.518441 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m"] Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.680595 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/690e12f7-a146-4c9f-ab44-ab2b7539ac35-proxy-ca-bundles\") pod \"controller-manager-6b9c4ffdc6-f7k2m\" (UID: \"690e12f7-a146-4c9f-ab44-ab2b7539ac35\") " pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.680657 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/690e12f7-a146-4c9f-ab44-ab2b7539ac35-config\") pod \"controller-manager-6b9c4ffdc6-f7k2m\" (UID: \"690e12f7-a146-4c9f-ab44-ab2b7539ac35\") " pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.680702 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9j9h8\" (UniqueName: \"kubernetes.io/projected/690e12f7-a146-4c9f-ab44-ab2b7539ac35-kube-api-access-9j9h8\") pod \"controller-manager-6b9c4ffdc6-f7k2m\" (UID: \"690e12f7-a146-4c9f-ab44-ab2b7539ac35\") " pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.680772 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/690e12f7-a146-4c9f-ab44-ab2b7539ac35-client-ca\") pod \"controller-manager-6b9c4ffdc6-f7k2m\" (UID: \"690e12f7-a146-4c9f-ab44-ab2b7539ac35\") " pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.687148 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/690e12f7-a146-4c9f-ab44-ab2b7539ac35-serving-cert\") pod \"controller-manager-6b9c4ffdc6-f7k2m\" (UID: \"690e12f7-a146-4c9f-ab44-ab2b7539ac35\") " pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.789319 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/690e12f7-a146-4c9f-ab44-ab2b7539ac35-client-ca\") pod \"controller-manager-6b9c4ffdc6-f7k2m\" (UID: \"690e12f7-a146-4c9f-ab44-ab2b7539ac35\") " pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.789373 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/690e12f7-a146-4c9f-ab44-ab2b7539ac35-serving-cert\") pod \"controller-manager-6b9c4ffdc6-f7k2m\" (UID: \"690e12f7-a146-4c9f-ab44-ab2b7539ac35\") " pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.789413 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/690e12f7-a146-4c9f-ab44-ab2b7539ac35-proxy-ca-bundles\") pod \"controller-manager-6b9c4ffdc6-f7k2m\" (UID: \"690e12f7-a146-4c9f-ab44-ab2b7539ac35\") " pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.789444 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/690e12f7-a146-4c9f-ab44-ab2b7539ac35-config\") pod \"controller-manager-6b9c4ffdc6-f7k2m\" (UID: \"690e12f7-a146-4c9f-ab44-ab2b7539ac35\") " pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.789478 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9j9h8\" (UniqueName: \"kubernetes.io/projected/690e12f7-a146-4c9f-ab44-ab2b7539ac35-kube-api-access-9j9h8\") pod \"controller-manager-6b9c4ffdc6-f7k2m\" (UID: \"690e12f7-a146-4c9f-ab44-ab2b7539ac35\") " pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.791361 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/690e12f7-a146-4c9f-ab44-ab2b7539ac35-proxy-ca-bundles\") pod \"controller-manager-6b9c4ffdc6-f7k2m\" (UID: \"690e12f7-a146-4c9f-ab44-ab2b7539ac35\") " pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.791600 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/690e12f7-a146-4c9f-ab44-ab2b7539ac35-client-ca\") pod \"controller-manager-6b9c4ffdc6-f7k2m\" (UID: \"690e12f7-a146-4c9f-ab44-ab2b7539ac35\") " pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.793040 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/690e12f7-a146-4c9f-ab44-ab2b7539ac35-config\") pod \"controller-manager-6b9c4ffdc6-f7k2m\" (UID: \"690e12f7-a146-4c9f-ab44-ab2b7539ac35\") " pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.810581 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/690e12f7-a146-4c9f-ab44-ab2b7539ac35-serving-cert\") pod \"controller-manager-6b9c4ffdc6-f7k2m\" (UID: \"690e12f7-a146-4c9f-ab44-ab2b7539ac35\") " pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.824492 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9j9h8\" (UniqueName: \"kubernetes.io/projected/690e12f7-a146-4c9f-ab44-ab2b7539ac35-kube-api-access-9j9h8\") pod \"controller-manager-6b9c4ffdc6-f7k2m\" (UID: \"690e12f7-a146-4c9f-ab44-ab2b7539ac35\") " pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:08 crc kubenswrapper[4923]: I1128 11:14:08.856379 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:09 crc kubenswrapper[4923]: I1128 11:14:09.119599 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m"] Nov 28 11:14:09 crc kubenswrapper[4923]: I1128 11:14:09.178014 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e02b609-01b8-4c38-9bab-514d9e7cef68" path="/var/lib/kubelet/pods/8e02b609-01b8-4c38-9bab-514d9e7cef68/volumes" Nov 28 11:14:09 crc kubenswrapper[4923]: I1128 11:14:09.908383 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" event={"ID":"690e12f7-a146-4c9f-ab44-ab2b7539ac35","Type":"ContainerStarted","Data":"6ad72afccc53fc996934b9ab3b7630ead6aff1f09cdb4780e73c5e28158a6431"} Nov 28 11:14:09 crc kubenswrapper[4923]: I1128 11:14:09.908826 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:09 crc kubenswrapper[4923]: I1128 11:14:09.908849 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" event={"ID":"690e12f7-a146-4c9f-ab44-ab2b7539ac35","Type":"ContainerStarted","Data":"c78e2250a54142aee466a3176e0f1015e0bf853116fdd26e7431f28860d52790"} Nov 28 11:14:09 crc kubenswrapper[4923]: I1128 11:14:09.914997 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" Nov 28 11:14:09 crc kubenswrapper[4923]: I1128 11:14:09.939984 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6b9c4ffdc6-f7k2m" podStartSLOduration=2.939967581 podStartE2EDuration="2.939967581s" podCreationTimestamp="2025-11-28 11:14:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:14:09.936170593 +0000 UTC m=+329.064854813" watchObservedRunningTime="2025-11-28 11:14:09.939967581 +0000 UTC m=+329.068651801" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.124262 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-mkn9p"] Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.125479 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.139086 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-mkn9p"] Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.215162 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8ad66a3e-89dd-497f-903d-bc0e65e51d18-registry-tls\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.215427 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8ad66a3e-89dd-497f-903d-bc0e65e51d18-bound-sa-token\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.215459 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8ad66a3e-89dd-497f-903d-bc0e65e51d18-ca-trust-extracted\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.215493 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8ad66a3e-89dd-497f-903d-bc0e65e51d18-installation-pull-secrets\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.215528 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5lzjz\" (UniqueName: \"kubernetes.io/projected/8ad66a3e-89dd-497f-903d-bc0e65e51d18-kube-api-access-5lzjz\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.215551 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8ad66a3e-89dd-497f-903d-bc0e65e51d18-registry-certificates\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.215576 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.215604 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8ad66a3e-89dd-497f-903d-bc0e65e51d18-trusted-ca\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.263490 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.317498 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8ad66a3e-89dd-497f-903d-bc0e65e51d18-trusted-ca\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.317674 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8ad66a3e-89dd-497f-903d-bc0e65e51d18-registry-tls\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.317724 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8ad66a3e-89dd-497f-903d-bc0e65e51d18-bound-sa-token\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.317813 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8ad66a3e-89dd-497f-903d-bc0e65e51d18-ca-trust-extracted\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.317875 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8ad66a3e-89dd-497f-903d-bc0e65e51d18-installation-pull-secrets\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.317970 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5lzjz\" (UniqueName: \"kubernetes.io/projected/8ad66a3e-89dd-497f-903d-bc0e65e51d18-kube-api-access-5lzjz\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.318043 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8ad66a3e-89dd-497f-903d-bc0e65e51d18-registry-certificates\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.319023 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8ad66a3e-89dd-497f-903d-bc0e65e51d18-registry-certificates\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.319274 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8ad66a3e-89dd-497f-903d-bc0e65e51d18-ca-trust-extracted\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.320177 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8ad66a3e-89dd-497f-903d-bc0e65e51d18-trusted-ca\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.333949 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8ad66a3e-89dd-497f-903d-bc0e65e51d18-bound-sa-token\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.337759 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8ad66a3e-89dd-497f-903d-bc0e65e51d18-registry-tls\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.338012 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5lzjz\" (UniqueName: \"kubernetes.io/projected/8ad66a3e-89dd-497f-903d-bc0e65e51d18-kube-api-access-5lzjz\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.338863 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8ad66a3e-89dd-497f-903d-bc0e65e51d18-installation-pull-secrets\") pod \"image-registry-66df7c8f76-mkn9p\" (UID: \"8ad66a3e-89dd-497f-903d-bc0e65e51d18\") " pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.455688 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:27 crc kubenswrapper[4923]: I1128 11:14:27.935592 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-mkn9p"] Nov 28 11:14:28 crc kubenswrapper[4923]: I1128 11:14:28.021448 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" event={"ID":"8ad66a3e-89dd-497f-903d-bc0e65e51d18","Type":"ContainerStarted","Data":"d1284e3bfafd665a7a4518e0ea44c318aae241cbc22080b2da95f90aa8fb8fc6"} Nov 28 11:14:29 crc kubenswrapper[4923]: I1128 11:14:29.028984 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" event={"ID":"8ad66a3e-89dd-497f-903d-bc0e65e51d18","Type":"ContainerStarted","Data":"d541ce0b5de2938f86e9184262c61a806392c31d58c5b281e441dbe26e2c1ea7"} Nov 28 11:14:29 crc kubenswrapper[4923]: I1128 11:14:29.029269 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:29 crc kubenswrapper[4923]: I1128 11:14:29.059783 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" podStartSLOduration=2.059758414 podStartE2EDuration="2.059758414s" podCreationTimestamp="2025-11-28 11:14:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:14:29.05819441 +0000 UTC m=+348.186878610" watchObservedRunningTime="2025-11-28 11:14:29.059758414 +0000 UTC m=+348.188442664" Nov 28 11:14:36 crc kubenswrapper[4923]: I1128 11:14:36.573071 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-js2pf"] Nov 28 11:14:36 crc kubenswrapper[4923]: I1128 11:14:36.574108 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-js2pf" podUID="f54a8053-81cc-429f-b68e-87a3fd245263" containerName="registry-server" containerID="cri-o://357c8fb2d02d63ba1f049ffe71339e08055730c4c91838dcf695fa385e8077bf" gracePeriod=2 Nov 28 11:14:37 crc kubenswrapper[4923]: I1128 11:14:37.084832 4923 generic.go:334] "Generic (PLEG): container finished" podID="f54a8053-81cc-429f-b68e-87a3fd245263" containerID="357c8fb2d02d63ba1f049ffe71339e08055730c4c91838dcf695fa385e8077bf" exitCode=0 Nov 28 11:14:37 crc kubenswrapper[4923]: I1128 11:14:37.084898 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-js2pf" event={"ID":"f54a8053-81cc-429f-b68e-87a3fd245263","Type":"ContainerDied","Data":"357c8fb2d02d63ba1f049ffe71339e08055730c4c91838dcf695fa385e8077bf"} Nov 28 11:14:37 crc kubenswrapper[4923]: I1128 11:14:37.085098 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-js2pf" event={"ID":"f54a8053-81cc-429f-b68e-87a3fd245263","Type":"ContainerDied","Data":"4df7fcd080d49d8fb2e935538ac1bc88e0529729bd7f1ac2e5c6998abd42cb56"} Nov 28 11:14:37 crc kubenswrapper[4923]: I1128 11:14:37.085116 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4df7fcd080d49d8fb2e935538ac1bc88e0529729bd7f1ac2e5c6998abd42cb56" Nov 28 11:14:37 crc kubenswrapper[4923]: I1128 11:14:37.093718 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-js2pf" Nov 28 11:14:37 crc kubenswrapper[4923]: I1128 11:14:37.196331 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7f9kp\" (UniqueName: \"kubernetes.io/projected/f54a8053-81cc-429f-b68e-87a3fd245263-kube-api-access-7f9kp\") pod \"f54a8053-81cc-429f-b68e-87a3fd245263\" (UID: \"f54a8053-81cc-429f-b68e-87a3fd245263\") " Nov 28 11:14:37 crc kubenswrapper[4923]: I1128 11:14:37.196370 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f54a8053-81cc-429f-b68e-87a3fd245263-utilities\") pod \"f54a8053-81cc-429f-b68e-87a3fd245263\" (UID: \"f54a8053-81cc-429f-b68e-87a3fd245263\") " Nov 28 11:14:37 crc kubenswrapper[4923]: I1128 11:14:37.196461 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f54a8053-81cc-429f-b68e-87a3fd245263-catalog-content\") pod \"f54a8053-81cc-429f-b68e-87a3fd245263\" (UID: \"f54a8053-81cc-429f-b68e-87a3fd245263\") " Nov 28 11:14:37 crc kubenswrapper[4923]: I1128 11:14:37.197737 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f54a8053-81cc-429f-b68e-87a3fd245263-utilities" (OuterVolumeSpecName: "utilities") pod "f54a8053-81cc-429f-b68e-87a3fd245263" (UID: "f54a8053-81cc-429f-b68e-87a3fd245263"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:14:37 crc kubenswrapper[4923]: I1128 11:14:37.202097 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f54a8053-81cc-429f-b68e-87a3fd245263-kube-api-access-7f9kp" (OuterVolumeSpecName: "kube-api-access-7f9kp") pod "f54a8053-81cc-429f-b68e-87a3fd245263" (UID: "f54a8053-81cc-429f-b68e-87a3fd245263"). InnerVolumeSpecName "kube-api-access-7f9kp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:14:37 crc kubenswrapper[4923]: I1128 11:14:37.242321 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f54a8053-81cc-429f-b68e-87a3fd245263-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f54a8053-81cc-429f-b68e-87a3fd245263" (UID: "f54a8053-81cc-429f-b68e-87a3fd245263"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:14:37 crc kubenswrapper[4923]: I1128 11:14:37.297585 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7f9kp\" (UniqueName: \"kubernetes.io/projected/f54a8053-81cc-429f-b68e-87a3fd245263-kube-api-access-7f9kp\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:37 crc kubenswrapper[4923]: I1128 11:14:37.297618 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f54a8053-81cc-429f-b68e-87a3fd245263-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:37 crc kubenswrapper[4923]: I1128 11:14:37.297631 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f54a8053-81cc-429f-b68e-87a3fd245263-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:38 crc kubenswrapper[4923]: I1128 11:14:38.092484 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-js2pf" Nov 28 11:14:38 crc kubenswrapper[4923]: I1128 11:14:38.142983 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-js2pf"] Nov 28 11:14:38 crc kubenswrapper[4923]: I1128 11:14:38.155280 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-js2pf"] Nov 28 11:14:39 crc kubenswrapper[4923]: I1128 11:14:39.176739 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f54a8053-81cc-429f-b68e-87a3fd245263" path="/var/lib/kubelet/pods/f54a8053-81cc-429f-b68e-87a3fd245263/volumes" Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.685331 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l4xf8"] Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.686841 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-l4xf8" podUID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" containerName="registry-server" containerID="cri-o://79306c65c47cb373aa6b330add93214a34b14d596e50348dc5ba71d68c337ffa" gracePeriod=30 Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.693905 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-46sx6"] Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.694142 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-46sx6" podUID="cffefe1d-9522-408d-aadf-c688411908e1" containerName="registry-server" containerID="cri-o://f775975c413550383947df61c989c197028a0a3e4706576ef6832f6d44a60806" gracePeriod=30 Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.703997 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lswhk"] Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.704192 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" podUID="bc404fb9-c265-4265-84e8-e3dd111fae9a" containerName="marketplace-operator" containerID="cri-o://872d6898468b2c0649316ec85c5451bff29fe4aafd305b008db81c94839c2c4f" gracePeriod=30 Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.720941 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n2qp8"] Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.721155 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-n2qp8" podUID="a6908a38-5adb-40c3-85e8-730eaa4937ef" containerName="registry-server" containerID="cri-o://f0425a114843f6e320533c00f2604a9fdeeca0c1005fb0a2f418a5ce1f933d2e" gracePeriod=30 Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.728814 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qkkh6"] Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.729031 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qkkh6" podUID="39eebecc-004d-445a-ac63-fad7bc311127" containerName="registry-server" containerID="cri-o://6cd659195ccf10b4581b41615cf0f9b3d06bad51789b24595e4edea2961793b8" gracePeriod=30 Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.735187 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mswcg"] Nov 28 11:14:42 crc kubenswrapper[4923]: E1128 11:14:42.735398 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f54a8053-81cc-429f-b68e-87a3fd245263" containerName="registry-server" Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.735414 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f54a8053-81cc-429f-b68e-87a3fd245263" containerName="registry-server" Nov 28 11:14:42 crc kubenswrapper[4923]: E1128 11:14:42.735429 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f54a8053-81cc-429f-b68e-87a3fd245263" containerName="extract-utilities" Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.735458 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f54a8053-81cc-429f-b68e-87a3fd245263" containerName="extract-utilities" Nov 28 11:14:42 crc kubenswrapper[4923]: E1128 11:14:42.735467 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f54a8053-81cc-429f-b68e-87a3fd245263" containerName="extract-content" Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.735473 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f54a8053-81cc-429f-b68e-87a3fd245263" containerName="extract-content" Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.735567 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="f54a8053-81cc-429f-b68e-87a3fd245263" containerName="registry-server" Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.737069 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mswcg" Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.757008 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mswcg"] Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.870176 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgrd5\" (UniqueName: \"kubernetes.io/projected/82dd6941-bad4-4c84-8589-efc71c77359b-kube-api-access-fgrd5\") pod \"marketplace-operator-79b997595-mswcg\" (UID: \"82dd6941-bad4-4c84-8589-efc71c77359b\") " pod="openshift-marketplace/marketplace-operator-79b997595-mswcg" Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.870229 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/82dd6941-bad4-4c84-8589-efc71c77359b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mswcg\" (UID: \"82dd6941-bad4-4c84-8589-efc71c77359b\") " pod="openshift-marketplace/marketplace-operator-79b997595-mswcg" Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.870289 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/82dd6941-bad4-4c84-8589-efc71c77359b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mswcg\" (UID: \"82dd6941-bad4-4c84-8589-efc71c77359b\") " pod="openshift-marketplace/marketplace-operator-79b997595-mswcg" Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.971188 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/82dd6941-bad4-4c84-8589-efc71c77359b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mswcg\" (UID: \"82dd6941-bad4-4c84-8589-efc71c77359b\") " pod="openshift-marketplace/marketplace-operator-79b997595-mswcg" Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.971244 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgrd5\" (UniqueName: \"kubernetes.io/projected/82dd6941-bad4-4c84-8589-efc71c77359b-kube-api-access-fgrd5\") pod \"marketplace-operator-79b997595-mswcg\" (UID: \"82dd6941-bad4-4c84-8589-efc71c77359b\") " pod="openshift-marketplace/marketplace-operator-79b997595-mswcg" Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.971267 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/82dd6941-bad4-4c84-8589-efc71c77359b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mswcg\" (UID: \"82dd6941-bad4-4c84-8589-efc71c77359b\") " pod="openshift-marketplace/marketplace-operator-79b997595-mswcg" Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.972841 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/82dd6941-bad4-4c84-8589-efc71c77359b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mswcg\" (UID: \"82dd6941-bad4-4c84-8589-efc71c77359b\") " pod="openshift-marketplace/marketplace-operator-79b997595-mswcg" Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.977761 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/82dd6941-bad4-4c84-8589-efc71c77359b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mswcg\" (UID: \"82dd6941-bad4-4c84-8589-efc71c77359b\") " pod="openshift-marketplace/marketplace-operator-79b997595-mswcg" Nov 28 11:14:42 crc kubenswrapper[4923]: I1128 11:14:42.988655 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgrd5\" (UniqueName: \"kubernetes.io/projected/82dd6941-bad4-4c84-8589-efc71c77359b-kube-api-access-fgrd5\") pod \"marketplace-operator-79b997595-mswcg\" (UID: \"82dd6941-bad4-4c84-8589-efc71c77359b\") " pod="openshift-marketplace/marketplace-operator-79b997595-mswcg" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.052284 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mswcg" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.149218 4923 generic.go:334] "Generic (PLEG): container finished" podID="cffefe1d-9522-408d-aadf-c688411908e1" containerID="f775975c413550383947df61c989c197028a0a3e4706576ef6832f6d44a60806" exitCode=0 Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.149283 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-46sx6" event={"ID":"cffefe1d-9522-408d-aadf-c688411908e1","Type":"ContainerDied","Data":"f775975c413550383947df61c989c197028a0a3e4706576ef6832f6d44a60806"} Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.156378 4923 generic.go:334] "Generic (PLEG): container finished" podID="bc404fb9-c265-4265-84e8-e3dd111fae9a" containerID="872d6898468b2c0649316ec85c5451bff29fe4aafd305b008db81c94839c2c4f" exitCode=0 Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.156464 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" event={"ID":"bc404fb9-c265-4265-84e8-e3dd111fae9a","Type":"ContainerDied","Data":"872d6898468b2c0649316ec85c5451bff29fe4aafd305b008db81c94839c2c4f"} Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.156516 4923 scope.go:117] "RemoveContainer" containerID="00403708fb60cdd5ce416fdf4b557eed99855044ed47656236d9eec393278396" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.163619 4923 generic.go:334] "Generic (PLEG): container finished" podID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" containerID="79306c65c47cb373aa6b330add93214a34b14d596e50348dc5ba71d68c337ffa" exitCode=0 Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.163684 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l4xf8" event={"ID":"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4","Type":"ContainerDied","Data":"79306c65c47cb373aa6b330add93214a34b14d596e50348dc5ba71d68c337ffa"} Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.169595 4923 generic.go:334] "Generic (PLEG): container finished" podID="a6908a38-5adb-40c3-85e8-730eaa4937ef" containerID="f0425a114843f6e320533c00f2604a9fdeeca0c1005fb0a2f418a5ce1f933d2e" exitCode=0 Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.175318 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n2qp8" event={"ID":"a6908a38-5adb-40c3-85e8-730eaa4937ef","Type":"ContainerDied","Data":"f0425a114843f6e320533c00f2604a9fdeeca0c1005fb0a2f418a5ce1f933d2e"} Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.233719 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l4xf8" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.377912 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k2bjs\" (UniqueName: \"kubernetes.io/projected/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4-kube-api-access-k2bjs\") pod \"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4\" (UID: \"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4\") " Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.377987 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4-catalog-content\") pod \"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4\" (UID: \"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4\") " Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.378021 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4-utilities\") pod \"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4\" (UID: \"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4\") " Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.379042 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4-utilities" (OuterVolumeSpecName: "utilities") pod "84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" (UID: "84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.388037 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4-kube-api-access-k2bjs" (OuterVolumeSpecName: "kube-api-access-k2bjs") pod "84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" (UID: "84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4"). InnerVolumeSpecName "kube-api-access-k2bjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.415940 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n2qp8" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.446425 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" (UID: "84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.461130 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-46sx6" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.465323 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.479705 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6908a38-5adb-40c3-85e8-730eaa4937ef-utilities\") pod \"a6908a38-5adb-40c3-85e8-730eaa4937ef\" (UID: \"a6908a38-5adb-40c3-85e8-730eaa4937ef\") " Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.479953 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6908a38-5adb-40c3-85e8-730eaa4937ef-catalog-content\") pod \"a6908a38-5adb-40c3-85e8-730eaa4937ef\" (UID: \"a6908a38-5adb-40c3-85e8-730eaa4937ef\") " Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.480068 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dd64\" (UniqueName: \"kubernetes.io/projected/a6908a38-5adb-40c3-85e8-730eaa4937ef-kube-api-access-9dd64\") pod \"a6908a38-5adb-40c3-85e8-730eaa4937ef\" (UID: \"a6908a38-5adb-40c3-85e8-730eaa4937ef\") " Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.480356 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k2bjs\" (UniqueName: \"kubernetes.io/projected/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4-kube-api-access-k2bjs\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.480479 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.480560 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.481536 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6908a38-5adb-40c3-85e8-730eaa4937ef-utilities" (OuterVolumeSpecName: "utilities") pod "a6908a38-5adb-40c3-85e8-730eaa4937ef" (UID: "a6908a38-5adb-40c3-85e8-730eaa4937ef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.488375 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6908a38-5adb-40c3-85e8-730eaa4937ef-kube-api-access-9dd64" (OuterVolumeSpecName: "kube-api-access-9dd64") pod "a6908a38-5adb-40c3-85e8-730eaa4937ef" (UID: "a6908a38-5adb-40c3-85e8-730eaa4937ef"). InnerVolumeSpecName "kube-api-access-9dd64". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.500717 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6908a38-5adb-40c3-85e8-730eaa4937ef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a6908a38-5adb-40c3-85e8-730eaa4937ef" (UID: "a6908a38-5adb-40c3-85e8-730eaa4937ef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.581304 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bc404fb9-c265-4265-84e8-e3dd111fae9a-marketplace-trusted-ca\") pod \"bc404fb9-c265-4265-84e8-e3dd111fae9a\" (UID: \"bc404fb9-c265-4265-84e8-e3dd111fae9a\") " Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.581368 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cffefe1d-9522-408d-aadf-c688411908e1-utilities\") pod \"cffefe1d-9522-408d-aadf-c688411908e1\" (UID: \"cffefe1d-9522-408d-aadf-c688411908e1\") " Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.581409 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zlhhb\" (UniqueName: \"kubernetes.io/projected/cffefe1d-9522-408d-aadf-c688411908e1-kube-api-access-zlhhb\") pod \"cffefe1d-9522-408d-aadf-c688411908e1\" (UID: \"cffefe1d-9522-408d-aadf-c688411908e1\") " Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.581432 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ll6pl\" (UniqueName: \"kubernetes.io/projected/bc404fb9-c265-4265-84e8-e3dd111fae9a-kube-api-access-ll6pl\") pod \"bc404fb9-c265-4265-84e8-e3dd111fae9a\" (UID: \"bc404fb9-c265-4265-84e8-e3dd111fae9a\") " Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.581482 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cffefe1d-9522-408d-aadf-c688411908e1-catalog-content\") pod \"cffefe1d-9522-408d-aadf-c688411908e1\" (UID: \"cffefe1d-9522-408d-aadf-c688411908e1\") " Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.581509 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/bc404fb9-c265-4265-84e8-e3dd111fae9a-marketplace-operator-metrics\") pod \"bc404fb9-c265-4265-84e8-e3dd111fae9a\" (UID: \"bc404fb9-c265-4265-84e8-e3dd111fae9a\") " Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.582351 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc404fb9-c265-4265-84e8-e3dd111fae9a-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "bc404fb9-c265-4265-84e8-e3dd111fae9a" (UID: "bc404fb9-c265-4265-84e8-e3dd111fae9a"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.582573 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cffefe1d-9522-408d-aadf-c688411908e1-utilities" (OuterVolumeSpecName: "utilities") pod "cffefe1d-9522-408d-aadf-c688411908e1" (UID: "cffefe1d-9522-408d-aadf-c688411908e1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.582588 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6908a38-5adb-40c3-85e8-730eaa4937ef-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.582614 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6908a38-5adb-40c3-85e8-730eaa4937ef-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.582629 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dd64\" (UniqueName: \"kubernetes.io/projected/a6908a38-5adb-40c3-85e8-730eaa4937ef-kube-api-access-9dd64\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.582642 4923 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bc404fb9-c265-4265-84e8-e3dd111fae9a-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.584565 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cffefe1d-9522-408d-aadf-c688411908e1-kube-api-access-zlhhb" (OuterVolumeSpecName: "kube-api-access-zlhhb") pod "cffefe1d-9522-408d-aadf-c688411908e1" (UID: "cffefe1d-9522-408d-aadf-c688411908e1"). InnerVolumeSpecName "kube-api-access-zlhhb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.584683 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc404fb9-c265-4265-84e8-e3dd111fae9a-kube-api-access-ll6pl" (OuterVolumeSpecName: "kube-api-access-ll6pl") pod "bc404fb9-c265-4265-84e8-e3dd111fae9a" (UID: "bc404fb9-c265-4265-84e8-e3dd111fae9a"). InnerVolumeSpecName "kube-api-access-ll6pl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.585116 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc404fb9-c265-4265-84e8-e3dd111fae9a-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "bc404fb9-c265-4265-84e8-e3dd111fae9a" (UID: "bc404fb9-c265-4265-84e8-e3dd111fae9a"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:14:43 crc kubenswrapper[4923]: W1128 11:14:43.590956 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod82dd6941_bad4_4c84_8589_efc71c77359b.slice/crio-1176d757240f5609c0cfc0577846e2519df47471eca95582fc0a23e0277895eb WatchSource:0}: Error finding container 1176d757240f5609c0cfc0577846e2519df47471eca95582fc0a23e0277895eb: Status 404 returned error can't find the container with id 1176d757240f5609c0cfc0577846e2519df47471eca95582fc0a23e0277895eb Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.593272 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mswcg"] Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.634534 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cffefe1d-9522-408d-aadf-c688411908e1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cffefe1d-9522-408d-aadf-c688411908e1" (UID: "cffefe1d-9522-408d-aadf-c688411908e1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.684084 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zlhhb\" (UniqueName: \"kubernetes.io/projected/cffefe1d-9522-408d-aadf-c688411908e1-kube-api-access-zlhhb\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.684106 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ll6pl\" (UniqueName: \"kubernetes.io/projected/bc404fb9-c265-4265-84e8-e3dd111fae9a-kube-api-access-ll6pl\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.684117 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cffefe1d-9522-408d-aadf-c688411908e1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.684125 4923 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/bc404fb9-c265-4265-84e8-e3dd111fae9a-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.684135 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cffefe1d-9522-408d-aadf-c688411908e1-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:43 crc kubenswrapper[4923]: I1128 11:14:43.952881 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qkkh6" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.025816 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.025866 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.089740 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39eebecc-004d-445a-ac63-fad7bc311127-catalog-content\") pod \"39eebecc-004d-445a-ac63-fad7bc311127\" (UID: \"39eebecc-004d-445a-ac63-fad7bc311127\") " Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.089853 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39eebecc-004d-445a-ac63-fad7bc311127-utilities\") pod \"39eebecc-004d-445a-ac63-fad7bc311127\" (UID: \"39eebecc-004d-445a-ac63-fad7bc311127\") " Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.089908 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wsdls\" (UniqueName: \"kubernetes.io/projected/39eebecc-004d-445a-ac63-fad7bc311127-kube-api-access-wsdls\") pod \"39eebecc-004d-445a-ac63-fad7bc311127\" (UID: \"39eebecc-004d-445a-ac63-fad7bc311127\") " Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.090787 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39eebecc-004d-445a-ac63-fad7bc311127-utilities" (OuterVolumeSpecName: "utilities") pod "39eebecc-004d-445a-ac63-fad7bc311127" (UID: "39eebecc-004d-445a-ac63-fad7bc311127"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.092838 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39eebecc-004d-445a-ac63-fad7bc311127-kube-api-access-wsdls" (OuterVolumeSpecName: "kube-api-access-wsdls") pod "39eebecc-004d-445a-ac63-fad7bc311127" (UID: "39eebecc-004d-445a-ac63-fad7bc311127"). InnerVolumeSpecName "kube-api-access-wsdls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.176008 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mswcg" event={"ID":"82dd6941-bad4-4c84-8589-efc71c77359b","Type":"ContainerStarted","Data":"73daab9efc749f287e7db217bb336193d1df3f40c8f604b865d00930fa0ce836"} Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.176049 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mswcg" event={"ID":"82dd6941-bad4-4c84-8589-efc71c77359b","Type":"ContainerStarted","Data":"1176d757240f5609c0cfc0577846e2519df47471eca95582fc0a23e0277895eb"} Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.179073 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-mswcg" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.184102 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-mswcg" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.187125 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n2qp8" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.187238 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n2qp8" event={"ID":"a6908a38-5adb-40c3-85e8-730eaa4937ef","Type":"ContainerDied","Data":"a9e7160ad2781e280da5d5ddc8aa57b3d0b20e676e3672c2fe549f67baf550cc"} Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.187306 4923 scope.go:117] "RemoveContainer" containerID="f0425a114843f6e320533c00f2604a9fdeeca0c1005fb0a2f418a5ce1f933d2e" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.190176 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-46sx6" event={"ID":"cffefe1d-9522-408d-aadf-c688411908e1","Type":"ContainerDied","Data":"0d68c9ad7077585d2ee26c0cf957b1273d3edb225d251056da7de07ec386b045"} Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.190256 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-46sx6" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.193466 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" event={"ID":"bc404fb9-c265-4265-84e8-e3dd111fae9a","Type":"ContainerDied","Data":"dbf363100d2155ec3c3a027a44a9dfc11953264f80e04b558e022f970c64a472"} Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.193690 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-lswhk" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.215110 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39eebecc-004d-445a-ac63-fad7bc311127-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.215144 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wsdls\" (UniqueName: \"kubernetes.io/projected/39eebecc-004d-445a-ac63-fad7bc311127-kube-api-access-wsdls\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.218856 4923 generic.go:334] "Generic (PLEG): container finished" podID="39eebecc-004d-445a-ac63-fad7bc311127" containerID="6cd659195ccf10b4581b41615cf0f9b3d06bad51789b24595e4edea2961793b8" exitCode=0 Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.219263 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkkh6" event={"ID":"39eebecc-004d-445a-ac63-fad7bc311127","Type":"ContainerDied","Data":"6cd659195ccf10b4581b41615cf0f9b3d06bad51789b24595e4edea2961793b8"} Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.219369 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkkh6" event={"ID":"39eebecc-004d-445a-ac63-fad7bc311127","Type":"ContainerDied","Data":"1784becd123262fb5a6b0fcc553667cf7c9d05ba4a1119ac1187fcb92e706d0d"} Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.219522 4923 scope.go:117] "RemoveContainer" containerID="2bd37d0797bd440a3d8260443928e2164e028b9a7a7b71341a218589a90d76c8" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.220282 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qkkh6" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.228298 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39eebecc-004d-445a-ac63-fad7bc311127-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "39eebecc-004d-445a-ac63-fad7bc311127" (UID: "39eebecc-004d-445a-ac63-fad7bc311127"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.232996 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-mswcg" podStartSLOduration=2.2328498 podStartE2EDuration="2.2328498s" podCreationTimestamp="2025-11-28 11:14:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:14:44.221783395 +0000 UTC m=+363.350467605" watchObservedRunningTime="2025-11-28 11:14:44.2328498 +0000 UTC m=+363.361534020" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.233721 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l4xf8" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.233700 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l4xf8" event={"ID":"84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4","Type":"ContainerDied","Data":"dad33e415182a38bca3d54483b7850c4ce969dbd825a76d3fa0a767ce24adfd8"} Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.251108 4923 scope.go:117] "RemoveContainer" containerID="999e1c5fc7205e272920516843b7a49f17c34dee055b617341851205f64f23d8" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.284122 4923 scope.go:117] "RemoveContainer" containerID="f775975c413550383947df61c989c197028a0a3e4706576ef6832f6d44a60806" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.300855 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l4xf8"] Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.304369 4923 scope.go:117] "RemoveContainer" containerID="9d3de31b4f8f9ffdad295a46f0feab01f0616585979132ebf82818b8b88cb244" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.309210 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-l4xf8"] Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.316336 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lswhk"] Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.316892 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39eebecc-004d-445a-ac63-fad7bc311127-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.323266 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lswhk"] Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.327106 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-46sx6"] Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.328274 4923 scope.go:117] "RemoveContainer" containerID="5c3d75468936d3cfc4d995838da04a93d38b91f0ce843c1e6e43509ddd14bae3" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.331349 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-46sx6"] Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.342862 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n2qp8"] Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.343168 4923 scope.go:117] "RemoveContainer" containerID="872d6898468b2c0649316ec85c5451bff29fe4aafd305b008db81c94839c2c4f" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.345584 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-n2qp8"] Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.355068 4923 scope.go:117] "RemoveContainer" containerID="6cd659195ccf10b4581b41615cf0f9b3d06bad51789b24595e4edea2961793b8" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.365559 4923 scope.go:117] "RemoveContainer" containerID="97dd0d28a0b522fde607b02a45bd9047813d56366c10ee03121fcce1ad26db65" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.378507 4923 scope.go:117] "RemoveContainer" containerID="7f549cf77a8107e54eaa0fb4a5051ee9e7132f25e62fe1ffc8fc94f409ea6d07" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.390001 4923 scope.go:117] "RemoveContainer" containerID="6cd659195ccf10b4581b41615cf0f9b3d06bad51789b24595e4edea2961793b8" Nov 28 11:14:44 crc kubenswrapper[4923]: E1128 11:14:44.390398 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6cd659195ccf10b4581b41615cf0f9b3d06bad51789b24595e4edea2961793b8\": container with ID starting with 6cd659195ccf10b4581b41615cf0f9b3d06bad51789b24595e4edea2961793b8 not found: ID does not exist" containerID="6cd659195ccf10b4581b41615cf0f9b3d06bad51789b24595e4edea2961793b8" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.390434 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cd659195ccf10b4581b41615cf0f9b3d06bad51789b24595e4edea2961793b8"} err="failed to get container status \"6cd659195ccf10b4581b41615cf0f9b3d06bad51789b24595e4edea2961793b8\": rpc error: code = NotFound desc = could not find container \"6cd659195ccf10b4581b41615cf0f9b3d06bad51789b24595e4edea2961793b8\": container with ID starting with 6cd659195ccf10b4581b41615cf0f9b3d06bad51789b24595e4edea2961793b8 not found: ID does not exist" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.390458 4923 scope.go:117] "RemoveContainer" containerID="97dd0d28a0b522fde607b02a45bd9047813d56366c10ee03121fcce1ad26db65" Nov 28 11:14:44 crc kubenswrapper[4923]: E1128 11:14:44.390772 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97dd0d28a0b522fde607b02a45bd9047813d56366c10ee03121fcce1ad26db65\": container with ID starting with 97dd0d28a0b522fde607b02a45bd9047813d56366c10ee03121fcce1ad26db65 not found: ID does not exist" containerID="97dd0d28a0b522fde607b02a45bd9047813d56366c10ee03121fcce1ad26db65" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.390957 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97dd0d28a0b522fde607b02a45bd9047813d56366c10ee03121fcce1ad26db65"} err="failed to get container status \"97dd0d28a0b522fde607b02a45bd9047813d56366c10ee03121fcce1ad26db65\": rpc error: code = NotFound desc = could not find container \"97dd0d28a0b522fde607b02a45bd9047813d56366c10ee03121fcce1ad26db65\": container with ID starting with 97dd0d28a0b522fde607b02a45bd9047813d56366c10ee03121fcce1ad26db65 not found: ID does not exist" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.391105 4923 scope.go:117] "RemoveContainer" containerID="7f549cf77a8107e54eaa0fb4a5051ee9e7132f25e62fe1ffc8fc94f409ea6d07" Nov 28 11:14:44 crc kubenswrapper[4923]: E1128 11:14:44.391528 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f549cf77a8107e54eaa0fb4a5051ee9e7132f25e62fe1ffc8fc94f409ea6d07\": container with ID starting with 7f549cf77a8107e54eaa0fb4a5051ee9e7132f25e62fe1ffc8fc94f409ea6d07 not found: ID does not exist" containerID="7f549cf77a8107e54eaa0fb4a5051ee9e7132f25e62fe1ffc8fc94f409ea6d07" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.391558 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f549cf77a8107e54eaa0fb4a5051ee9e7132f25e62fe1ffc8fc94f409ea6d07"} err="failed to get container status \"7f549cf77a8107e54eaa0fb4a5051ee9e7132f25e62fe1ffc8fc94f409ea6d07\": rpc error: code = NotFound desc = could not find container \"7f549cf77a8107e54eaa0fb4a5051ee9e7132f25e62fe1ffc8fc94f409ea6d07\": container with ID starting with 7f549cf77a8107e54eaa0fb4a5051ee9e7132f25e62fe1ffc8fc94f409ea6d07 not found: ID does not exist" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.391580 4923 scope.go:117] "RemoveContainer" containerID="79306c65c47cb373aa6b330add93214a34b14d596e50348dc5ba71d68c337ffa" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.404076 4923 scope.go:117] "RemoveContainer" containerID="658ebd66c7b75847ddbaec1b023e59d3a812ea63a52fb0fecad9961e55c233b6" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.423303 4923 scope.go:117] "RemoveContainer" containerID="5f84e44d04ae5872ea05bdcaa7e7d623d3e51987cb7a2b80955128fb4344cb39" Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.553033 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qkkh6"] Nov 28 11:14:44 crc kubenswrapper[4923]: I1128 11:14:44.555559 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qkkh6"] Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.178500 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39eebecc-004d-445a-ac63-fad7bc311127" path="/var/lib/kubelet/pods/39eebecc-004d-445a-ac63-fad7bc311127/volumes" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.180196 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" path="/var/lib/kubelet/pods/84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4/volumes" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.181000 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6908a38-5adb-40c3-85e8-730eaa4937ef" path="/var/lib/kubelet/pods/a6908a38-5adb-40c3-85e8-730eaa4937ef/volumes" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.181776 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc404fb9-c265-4265-84e8-e3dd111fae9a" path="/var/lib/kubelet/pods/bc404fb9-c265-4265-84e8-e3dd111fae9a/volumes" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.183310 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cffefe1d-9522-408d-aadf-c688411908e1" path="/var/lib/kubelet/pods/cffefe1d-9522-408d-aadf-c688411908e1/volumes" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376190 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rwgwx"] Nov 28 11:14:45 crc kubenswrapper[4923]: E1128 11:14:45.376357 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39eebecc-004d-445a-ac63-fad7bc311127" containerName="extract-utilities" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376368 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="39eebecc-004d-445a-ac63-fad7bc311127" containerName="extract-utilities" Nov 28 11:14:45 crc kubenswrapper[4923]: E1128 11:14:45.376380 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cffefe1d-9522-408d-aadf-c688411908e1" containerName="extract-content" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376386 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="cffefe1d-9522-408d-aadf-c688411908e1" containerName="extract-content" Nov 28 11:14:45 crc kubenswrapper[4923]: E1128 11:14:45.376398 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cffefe1d-9522-408d-aadf-c688411908e1" containerName="registry-server" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376405 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="cffefe1d-9522-408d-aadf-c688411908e1" containerName="registry-server" Nov 28 11:14:45 crc kubenswrapper[4923]: E1128 11:14:45.376415 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc404fb9-c265-4265-84e8-e3dd111fae9a" containerName="marketplace-operator" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376421 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc404fb9-c265-4265-84e8-e3dd111fae9a" containerName="marketplace-operator" Nov 28 11:14:45 crc kubenswrapper[4923]: E1128 11:14:45.376428 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" containerName="extract-content" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376433 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" containerName="extract-content" Nov 28 11:14:45 crc kubenswrapper[4923]: E1128 11:14:45.376440 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6908a38-5adb-40c3-85e8-730eaa4937ef" containerName="registry-server" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376446 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6908a38-5adb-40c3-85e8-730eaa4937ef" containerName="registry-server" Nov 28 11:14:45 crc kubenswrapper[4923]: E1128 11:14:45.376454 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cffefe1d-9522-408d-aadf-c688411908e1" containerName="extract-utilities" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376460 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="cffefe1d-9522-408d-aadf-c688411908e1" containerName="extract-utilities" Nov 28 11:14:45 crc kubenswrapper[4923]: E1128 11:14:45.376467 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" containerName="registry-server" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376473 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" containerName="registry-server" Nov 28 11:14:45 crc kubenswrapper[4923]: E1128 11:14:45.376479 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6908a38-5adb-40c3-85e8-730eaa4937ef" containerName="extract-content" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376485 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6908a38-5adb-40c3-85e8-730eaa4937ef" containerName="extract-content" Nov 28 11:14:45 crc kubenswrapper[4923]: E1128 11:14:45.376493 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" containerName="extract-utilities" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376499 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" containerName="extract-utilities" Nov 28 11:14:45 crc kubenswrapper[4923]: E1128 11:14:45.376506 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39eebecc-004d-445a-ac63-fad7bc311127" containerName="extract-content" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376512 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="39eebecc-004d-445a-ac63-fad7bc311127" containerName="extract-content" Nov 28 11:14:45 crc kubenswrapper[4923]: E1128 11:14:45.376522 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6908a38-5adb-40c3-85e8-730eaa4937ef" containerName="extract-utilities" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376528 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6908a38-5adb-40c3-85e8-730eaa4937ef" containerName="extract-utilities" Nov 28 11:14:45 crc kubenswrapper[4923]: E1128 11:14:45.376537 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39eebecc-004d-445a-ac63-fad7bc311127" containerName="registry-server" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376543 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="39eebecc-004d-445a-ac63-fad7bc311127" containerName="registry-server" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376619 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="39eebecc-004d-445a-ac63-fad7bc311127" containerName="registry-server" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376627 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc404fb9-c265-4265-84e8-e3dd111fae9a" containerName="marketplace-operator" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376635 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="84ce6a6f-d3d1-4ef9-8ca5-79dfa714a2b4" containerName="registry-server" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376644 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="cffefe1d-9522-408d-aadf-c688411908e1" containerName="registry-server" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376654 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc404fb9-c265-4265-84e8-e3dd111fae9a" containerName="marketplace-operator" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376661 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6908a38-5adb-40c3-85e8-730eaa4937ef" containerName="registry-server" Nov 28 11:14:45 crc kubenswrapper[4923]: E1128 11:14:45.376742 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc404fb9-c265-4265-84e8-e3dd111fae9a" containerName="marketplace-operator" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.376750 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc404fb9-c265-4265-84e8-e3dd111fae9a" containerName="marketplace-operator" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.377333 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rwgwx" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.381218 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.389841 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rwgwx"] Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.533895 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20b869c7-4aad-4ba4-a8c0-c1dbd053515c-catalog-content\") pod \"certified-operators-rwgwx\" (UID: \"20b869c7-4aad-4ba4-a8c0-c1dbd053515c\") " pod="openshift-marketplace/certified-operators-rwgwx" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.533963 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4znk\" (UniqueName: \"kubernetes.io/projected/20b869c7-4aad-4ba4-a8c0-c1dbd053515c-kube-api-access-s4znk\") pod \"certified-operators-rwgwx\" (UID: \"20b869c7-4aad-4ba4-a8c0-c1dbd053515c\") " pod="openshift-marketplace/certified-operators-rwgwx" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.533986 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20b869c7-4aad-4ba4-a8c0-c1dbd053515c-utilities\") pod \"certified-operators-rwgwx\" (UID: \"20b869c7-4aad-4ba4-a8c0-c1dbd053515c\") " pod="openshift-marketplace/certified-operators-rwgwx" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.634715 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20b869c7-4aad-4ba4-a8c0-c1dbd053515c-catalog-content\") pod \"certified-operators-rwgwx\" (UID: \"20b869c7-4aad-4ba4-a8c0-c1dbd053515c\") " pod="openshift-marketplace/certified-operators-rwgwx" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.634762 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4znk\" (UniqueName: \"kubernetes.io/projected/20b869c7-4aad-4ba4-a8c0-c1dbd053515c-kube-api-access-s4znk\") pod \"certified-operators-rwgwx\" (UID: \"20b869c7-4aad-4ba4-a8c0-c1dbd053515c\") " pod="openshift-marketplace/certified-operators-rwgwx" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.634783 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20b869c7-4aad-4ba4-a8c0-c1dbd053515c-utilities\") pod \"certified-operators-rwgwx\" (UID: \"20b869c7-4aad-4ba4-a8c0-c1dbd053515c\") " pod="openshift-marketplace/certified-operators-rwgwx" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.635202 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20b869c7-4aad-4ba4-a8c0-c1dbd053515c-utilities\") pod \"certified-operators-rwgwx\" (UID: \"20b869c7-4aad-4ba4-a8c0-c1dbd053515c\") " pod="openshift-marketplace/certified-operators-rwgwx" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.635316 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20b869c7-4aad-4ba4-a8c0-c1dbd053515c-catalog-content\") pod \"certified-operators-rwgwx\" (UID: \"20b869c7-4aad-4ba4-a8c0-c1dbd053515c\") " pod="openshift-marketplace/certified-operators-rwgwx" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.655882 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4znk\" (UniqueName: \"kubernetes.io/projected/20b869c7-4aad-4ba4-a8c0-c1dbd053515c-kube-api-access-s4znk\") pod \"certified-operators-rwgwx\" (UID: \"20b869c7-4aad-4ba4-a8c0-c1dbd053515c\") " pod="openshift-marketplace/certified-operators-rwgwx" Nov 28 11:14:45 crc kubenswrapper[4923]: I1128 11:14:45.696241 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rwgwx" Nov 28 11:14:46 crc kubenswrapper[4923]: I1128 11:14:46.097577 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rwgwx"] Nov 28 11:14:46 crc kubenswrapper[4923]: I1128 11:14:46.251915 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rwgwx" event={"ID":"20b869c7-4aad-4ba4-a8c0-c1dbd053515c","Type":"ContainerStarted","Data":"542f9cf8319d9554c332b8d0eedc9dfc216eb84c8b7f98e91404f9f1654fcaca"} Nov 28 11:14:46 crc kubenswrapper[4923]: I1128 11:14:46.374745 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-l2szt"] Nov 28 11:14:46 crc kubenswrapper[4923]: I1128 11:14:46.375985 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l2szt" Nov 28 11:14:46 crc kubenswrapper[4923]: I1128 11:14:46.385637 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 11:14:46 crc kubenswrapper[4923]: I1128 11:14:46.387764 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l2szt"] Nov 28 11:14:46 crc kubenswrapper[4923]: I1128 11:14:46.547093 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c8bb38f-8751-4623-9098-d7a8bc8423ec-catalog-content\") pod \"community-operators-l2szt\" (UID: \"4c8bb38f-8751-4623-9098-d7a8bc8423ec\") " pod="openshift-marketplace/community-operators-l2szt" Nov 28 11:14:46 crc kubenswrapper[4923]: I1128 11:14:46.547159 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpjtc\" (UniqueName: \"kubernetes.io/projected/4c8bb38f-8751-4623-9098-d7a8bc8423ec-kube-api-access-xpjtc\") pod \"community-operators-l2szt\" (UID: \"4c8bb38f-8751-4623-9098-d7a8bc8423ec\") " pod="openshift-marketplace/community-operators-l2szt" Nov 28 11:14:46 crc kubenswrapper[4923]: I1128 11:14:46.547193 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c8bb38f-8751-4623-9098-d7a8bc8423ec-utilities\") pod \"community-operators-l2szt\" (UID: \"4c8bb38f-8751-4623-9098-d7a8bc8423ec\") " pod="openshift-marketplace/community-operators-l2szt" Nov 28 11:14:46 crc kubenswrapper[4923]: I1128 11:14:46.648505 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c8bb38f-8751-4623-9098-d7a8bc8423ec-catalog-content\") pod \"community-operators-l2szt\" (UID: \"4c8bb38f-8751-4623-9098-d7a8bc8423ec\") " pod="openshift-marketplace/community-operators-l2szt" Nov 28 11:14:46 crc kubenswrapper[4923]: I1128 11:14:46.648576 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpjtc\" (UniqueName: \"kubernetes.io/projected/4c8bb38f-8751-4623-9098-d7a8bc8423ec-kube-api-access-xpjtc\") pod \"community-operators-l2szt\" (UID: \"4c8bb38f-8751-4623-9098-d7a8bc8423ec\") " pod="openshift-marketplace/community-operators-l2szt" Nov 28 11:14:46 crc kubenswrapper[4923]: I1128 11:14:46.648606 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c8bb38f-8751-4623-9098-d7a8bc8423ec-utilities\") pod \"community-operators-l2szt\" (UID: \"4c8bb38f-8751-4623-9098-d7a8bc8423ec\") " pod="openshift-marketplace/community-operators-l2szt" Nov 28 11:14:46 crc kubenswrapper[4923]: I1128 11:14:46.649018 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c8bb38f-8751-4623-9098-d7a8bc8423ec-utilities\") pod \"community-operators-l2szt\" (UID: \"4c8bb38f-8751-4623-9098-d7a8bc8423ec\") " pod="openshift-marketplace/community-operators-l2szt" Nov 28 11:14:46 crc kubenswrapper[4923]: I1128 11:14:46.649168 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c8bb38f-8751-4623-9098-d7a8bc8423ec-catalog-content\") pod \"community-operators-l2szt\" (UID: \"4c8bb38f-8751-4623-9098-d7a8bc8423ec\") " pod="openshift-marketplace/community-operators-l2szt" Nov 28 11:14:46 crc kubenswrapper[4923]: I1128 11:14:46.669729 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpjtc\" (UniqueName: \"kubernetes.io/projected/4c8bb38f-8751-4623-9098-d7a8bc8423ec-kube-api-access-xpjtc\") pod \"community-operators-l2szt\" (UID: \"4c8bb38f-8751-4623-9098-d7a8bc8423ec\") " pod="openshift-marketplace/community-operators-l2szt" Nov 28 11:14:46 crc kubenswrapper[4923]: I1128 11:14:46.706095 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l2szt" Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.103485 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l2szt"] Nov 28 11:14:47 crc kubenswrapper[4923]: W1128 11:14:47.107887 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4c8bb38f_8751_4623_9098_d7a8bc8423ec.slice/crio-5c94bc7158d44871807385e92df5d764e04e276e827dd5497d8d2c90fc9f4d6f WatchSource:0}: Error finding container 5c94bc7158d44871807385e92df5d764e04e276e827dd5497d8d2c90fc9f4d6f: Status 404 returned error can't find the container with id 5c94bc7158d44871807385e92df5d764e04e276e827dd5497d8d2c90fc9f4d6f Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.258844 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2szt" event={"ID":"4c8bb38f-8751-4623-9098-d7a8bc8423ec","Type":"ContainerStarted","Data":"5c94bc7158d44871807385e92df5d764e04e276e827dd5497d8d2c90fc9f4d6f"} Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.262814 4923 generic.go:334] "Generic (PLEG): container finished" podID="20b869c7-4aad-4ba4-a8c0-c1dbd053515c" containerID="266aead54ee6a2ff58e3341820d4e257ac20784d4ccd6aa4cc9206d7ae542f44" exitCode=0 Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.262903 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rwgwx" event={"ID":"20b869c7-4aad-4ba4-a8c0-c1dbd053515c","Type":"ContainerDied","Data":"266aead54ee6a2ff58e3341820d4e257ac20784d4ccd6aa4cc9206d7ae542f44"} Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.461524 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-mkn9p" Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.508651 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-g855d"] Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.780032 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-sb2nx"] Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.781123 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sb2nx" Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.784736 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.806238 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sb2nx"] Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.867382 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2b69365-6bbb-41c8-afba-9d95f8f80d48-utilities\") pod \"redhat-marketplace-sb2nx\" (UID: \"c2b69365-6bbb-41c8-afba-9d95f8f80d48\") " pod="openshift-marketplace/redhat-marketplace-sb2nx" Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.867501 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nrwz\" (UniqueName: \"kubernetes.io/projected/c2b69365-6bbb-41c8-afba-9d95f8f80d48-kube-api-access-9nrwz\") pod \"redhat-marketplace-sb2nx\" (UID: \"c2b69365-6bbb-41c8-afba-9d95f8f80d48\") " pod="openshift-marketplace/redhat-marketplace-sb2nx" Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.867522 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2b69365-6bbb-41c8-afba-9d95f8f80d48-catalog-content\") pod \"redhat-marketplace-sb2nx\" (UID: \"c2b69365-6bbb-41c8-afba-9d95f8f80d48\") " pod="openshift-marketplace/redhat-marketplace-sb2nx" Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.969048 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2b69365-6bbb-41c8-afba-9d95f8f80d48-utilities\") pod \"redhat-marketplace-sb2nx\" (UID: \"c2b69365-6bbb-41c8-afba-9d95f8f80d48\") " pod="openshift-marketplace/redhat-marketplace-sb2nx" Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.969137 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nrwz\" (UniqueName: \"kubernetes.io/projected/c2b69365-6bbb-41c8-afba-9d95f8f80d48-kube-api-access-9nrwz\") pod \"redhat-marketplace-sb2nx\" (UID: \"c2b69365-6bbb-41c8-afba-9d95f8f80d48\") " pod="openshift-marketplace/redhat-marketplace-sb2nx" Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.969157 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2b69365-6bbb-41c8-afba-9d95f8f80d48-catalog-content\") pod \"redhat-marketplace-sb2nx\" (UID: \"c2b69365-6bbb-41c8-afba-9d95f8f80d48\") " pod="openshift-marketplace/redhat-marketplace-sb2nx" Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.969490 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2b69365-6bbb-41c8-afba-9d95f8f80d48-utilities\") pod \"redhat-marketplace-sb2nx\" (UID: \"c2b69365-6bbb-41c8-afba-9d95f8f80d48\") " pod="openshift-marketplace/redhat-marketplace-sb2nx" Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.969528 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2b69365-6bbb-41c8-afba-9d95f8f80d48-catalog-content\") pod \"redhat-marketplace-sb2nx\" (UID: \"c2b69365-6bbb-41c8-afba-9d95f8f80d48\") " pod="openshift-marketplace/redhat-marketplace-sb2nx" Nov 28 11:14:47 crc kubenswrapper[4923]: I1128 11:14:47.986069 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nrwz\" (UniqueName: \"kubernetes.io/projected/c2b69365-6bbb-41c8-afba-9d95f8f80d48-kube-api-access-9nrwz\") pod \"redhat-marketplace-sb2nx\" (UID: \"c2b69365-6bbb-41c8-afba-9d95f8f80d48\") " pod="openshift-marketplace/redhat-marketplace-sb2nx" Nov 28 11:14:48 crc kubenswrapper[4923]: I1128 11:14:48.096974 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sb2nx" Nov 28 11:14:48 crc kubenswrapper[4923]: I1128 11:14:48.269808 4923 generic.go:334] "Generic (PLEG): container finished" podID="20b869c7-4aad-4ba4-a8c0-c1dbd053515c" containerID="e0a18e6eed5bd56986c9d2fe1e8fb0d944328a3b491a0bd824cf1ef6412e6396" exitCode=0 Nov 28 11:14:48 crc kubenswrapper[4923]: I1128 11:14:48.270016 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rwgwx" event={"ID":"20b869c7-4aad-4ba4-a8c0-c1dbd053515c","Type":"ContainerDied","Data":"e0a18e6eed5bd56986c9d2fe1e8fb0d944328a3b491a0bd824cf1ef6412e6396"} Nov 28 11:14:48 crc kubenswrapper[4923]: I1128 11:14:48.273199 4923 generic.go:334] "Generic (PLEG): container finished" podID="4c8bb38f-8751-4623-9098-d7a8bc8423ec" containerID="eee95fb3111a96790e78816d7a859e2600d88318d032d8c1cffa2949d7c98f64" exitCode=0 Nov 28 11:14:48 crc kubenswrapper[4923]: I1128 11:14:48.273239 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2szt" event={"ID":"4c8bb38f-8751-4623-9098-d7a8bc8423ec","Type":"ContainerDied","Data":"eee95fb3111a96790e78816d7a859e2600d88318d032d8c1cffa2949d7c98f64"} Nov 28 11:14:48 crc kubenswrapper[4923]: I1128 11:14:48.511398 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sb2nx"] Nov 28 11:14:48 crc kubenswrapper[4923]: W1128 11:14:48.519180 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2b69365_6bbb_41c8_afba_9d95f8f80d48.slice/crio-6dbabcfdb9e489e08adb452bc74c86ea5f5914cecee8e9557e237d541ea128e1 WatchSource:0}: Error finding container 6dbabcfdb9e489e08adb452bc74c86ea5f5914cecee8e9557e237d541ea128e1: Status 404 returned error can't find the container with id 6dbabcfdb9e489e08adb452bc74c86ea5f5914cecee8e9557e237d541ea128e1 Nov 28 11:14:48 crc kubenswrapper[4923]: I1128 11:14:48.778540 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-l2nw6"] Nov 28 11:14:48 crc kubenswrapper[4923]: I1128 11:14:48.781149 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l2nw6" Nov 28 11:14:48 crc kubenswrapper[4923]: I1128 11:14:48.783448 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 11:14:48 crc kubenswrapper[4923]: I1128 11:14:48.793912 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l2nw6"] Nov 28 11:14:48 crc kubenswrapper[4923]: I1128 11:14:48.878730 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2h4k\" (UniqueName: \"kubernetes.io/projected/4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c-kube-api-access-k2h4k\") pod \"redhat-operators-l2nw6\" (UID: \"4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c\") " pod="openshift-marketplace/redhat-operators-l2nw6" Nov 28 11:14:48 crc kubenswrapper[4923]: I1128 11:14:48.878807 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c-utilities\") pod \"redhat-operators-l2nw6\" (UID: \"4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c\") " pod="openshift-marketplace/redhat-operators-l2nw6" Nov 28 11:14:48 crc kubenswrapper[4923]: I1128 11:14:48.878910 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c-catalog-content\") pod \"redhat-operators-l2nw6\" (UID: \"4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c\") " pod="openshift-marketplace/redhat-operators-l2nw6" Nov 28 11:14:48 crc kubenswrapper[4923]: I1128 11:14:48.980460 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c-catalog-content\") pod \"redhat-operators-l2nw6\" (UID: \"4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c\") " pod="openshift-marketplace/redhat-operators-l2nw6" Nov 28 11:14:48 crc kubenswrapper[4923]: I1128 11:14:48.980512 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2h4k\" (UniqueName: \"kubernetes.io/projected/4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c-kube-api-access-k2h4k\") pod \"redhat-operators-l2nw6\" (UID: \"4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c\") " pod="openshift-marketplace/redhat-operators-l2nw6" Nov 28 11:14:48 crc kubenswrapper[4923]: I1128 11:14:48.980540 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c-utilities\") pod \"redhat-operators-l2nw6\" (UID: \"4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c\") " pod="openshift-marketplace/redhat-operators-l2nw6" Nov 28 11:14:48 crc kubenswrapper[4923]: I1128 11:14:48.981157 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c-catalog-content\") pod \"redhat-operators-l2nw6\" (UID: \"4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c\") " pod="openshift-marketplace/redhat-operators-l2nw6" Nov 28 11:14:48 crc kubenswrapper[4923]: I1128 11:14:48.981165 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c-utilities\") pod \"redhat-operators-l2nw6\" (UID: \"4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c\") " pod="openshift-marketplace/redhat-operators-l2nw6" Nov 28 11:14:49 crc kubenswrapper[4923]: I1128 11:14:48.999779 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2h4k\" (UniqueName: \"kubernetes.io/projected/4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c-kube-api-access-k2h4k\") pod \"redhat-operators-l2nw6\" (UID: \"4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c\") " pod="openshift-marketplace/redhat-operators-l2nw6" Nov 28 11:14:49 crc kubenswrapper[4923]: I1128 11:14:49.113605 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l2nw6" Nov 28 11:14:49 crc kubenswrapper[4923]: I1128 11:14:49.281058 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rwgwx" event={"ID":"20b869c7-4aad-4ba4-a8c0-c1dbd053515c","Type":"ContainerStarted","Data":"c51e5601a8a39d168a86ec1b7658acf15c518a64648a3fb20ee3e8f2965e4004"} Nov 28 11:14:49 crc kubenswrapper[4923]: I1128 11:14:49.282522 4923 generic.go:334] "Generic (PLEG): container finished" podID="c2b69365-6bbb-41c8-afba-9d95f8f80d48" containerID="348fcd69c5dafe0e4c6cddcdf8ef85970a2e8eedc016902b4210dce562b6e53b" exitCode=0 Nov 28 11:14:49 crc kubenswrapper[4923]: I1128 11:14:49.282574 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sb2nx" event={"ID":"c2b69365-6bbb-41c8-afba-9d95f8f80d48","Type":"ContainerDied","Data":"348fcd69c5dafe0e4c6cddcdf8ef85970a2e8eedc016902b4210dce562b6e53b"} Nov 28 11:14:49 crc kubenswrapper[4923]: I1128 11:14:49.282622 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sb2nx" event={"ID":"c2b69365-6bbb-41c8-afba-9d95f8f80d48","Type":"ContainerStarted","Data":"6dbabcfdb9e489e08adb452bc74c86ea5f5914cecee8e9557e237d541ea128e1"} Nov 28 11:14:49 crc kubenswrapper[4923]: I1128 11:14:49.294512 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2szt" event={"ID":"4c8bb38f-8751-4623-9098-d7a8bc8423ec","Type":"ContainerStarted","Data":"d2360e40180270624c83e53344cab72becbe820ac260198a1f88b9c78f9611dd"} Nov 28 11:14:49 crc kubenswrapper[4923]: I1128 11:14:49.321619 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rwgwx" podStartSLOduration=2.877075433 podStartE2EDuration="4.32160201s" podCreationTimestamp="2025-11-28 11:14:45 +0000 UTC" firstStartedPulling="2025-11-28 11:14:47.264247267 +0000 UTC m=+366.392931477" lastFinishedPulling="2025-11-28 11:14:48.708773854 +0000 UTC m=+367.837458054" observedRunningTime="2025-11-28 11:14:49.317163034 +0000 UTC m=+368.445847244" watchObservedRunningTime="2025-11-28 11:14:49.32160201 +0000 UTC m=+368.450286210" Nov 28 11:14:49 crc kubenswrapper[4923]: I1128 11:14:49.321983 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l2nw6"] Nov 28 11:14:49 crc kubenswrapper[4923]: W1128 11:14:49.352572 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4a7b435f_bbd9_4ca6_8d5d_d411879c0b0c.slice/crio-6fc4ef015d98d6b60baf3cb6413450d8757922ee25ed24b7904e6207c7d4573d WatchSource:0}: Error finding container 6fc4ef015d98d6b60baf3cb6413450d8757922ee25ed24b7904e6207c7d4573d: Status 404 returned error can't find the container with id 6fc4ef015d98d6b60baf3cb6413450d8757922ee25ed24b7904e6207c7d4573d Nov 28 11:14:50 crc kubenswrapper[4923]: I1128 11:14:50.301381 4923 generic.go:334] "Generic (PLEG): container finished" podID="4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c" containerID="700fa7dfe26e5a9ba9f3d295eee57758980f9f0caae7ba4cd5868239218db644" exitCode=0 Nov 28 11:14:50 crc kubenswrapper[4923]: I1128 11:14:50.302066 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l2nw6" event={"ID":"4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c","Type":"ContainerDied","Data":"700fa7dfe26e5a9ba9f3d295eee57758980f9f0caae7ba4cd5868239218db644"} Nov 28 11:14:50 crc kubenswrapper[4923]: I1128 11:14:50.302125 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l2nw6" event={"ID":"4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c","Type":"ContainerStarted","Data":"6fc4ef015d98d6b60baf3cb6413450d8757922ee25ed24b7904e6207c7d4573d"} Nov 28 11:14:50 crc kubenswrapper[4923]: I1128 11:14:50.307672 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sb2nx" event={"ID":"c2b69365-6bbb-41c8-afba-9d95f8f80d48","Type":"ContainerStarted","Data":"5fbc28a0f99d6b619aa9cb2d2f23038e846f32bab1ec5521b76a6515fbab33ba"} Nov 28 11:14:50 crc kubenswrapper[4923]: I1128 11:14:50.310234 4923 generic.go:334] "Generic (PLEG): container finished" podID="4c8bb38f-8751-4623-9098-d7a8bc8423ec" containerID="d2360e40180270624c83e53344cab72becbe820ac260198a1f88b9c78f9611dd" exitCode=0 Nov 28 11:14:50 crc kubenswrapper[4923]: I1128 11:14:50.310320 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2szt" event={"ID":"4c8bb38f-8751-4623-9098-d7a8bc8423ec","Type":"ContainerDied","Data":"d2360e40180270624c83e53344cab72becbe820ac260198a1f88b9c78f9611dd"} Nov 28 11:14:51 crc kubenswrapper[4923]: I1128 11:14:51.334713 4923 generic.go:334] "Generic (PLEG): container finished" podID="c2b69365-6bbb-41c8-afba-9d95f8f80d48" containerID="5fbc28a0f99d6b619aa9cb2d2f23038e846f32bab1ec5521b76a6515fbab33ba" exitCode=0 Nov 28 11:14:51 crc kubenswrapper[4923]: I1128 11:14:51.334754 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sb2nx" event={"ID":"c2b69365-6bbb-41c8-afba-9d95f8f80d48","Type":"ContainerDied","Data":"5fbc28a0f99d6b619aa9cb2d2f23038e846f32bab1ec5521b76a6515fbab33ba"} Nov 28 11:14:51 crc kubenswrapper[4923]: I1128 11:14:51.338294 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2szt" event={"ID":"4c8bb38f-8751-4623-9098-d7a8bc8423ec","Type":"ContainerStarted","Data":"d77404da401ee8cbd029c1fd00bd884e3d3a25fec3b87ae137f77561cadbb4ea"} Nov 28 11:14:51 crc kubenswrapper[4923]: I1128 11:14:51.379672 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-l2szt" podStartSLOduration=2.928562354 podStartE2EDuration="5.379636161s" podCreationTimestamp="2025-11-28 11:14:46 +0000 UTC" firstStartedPulling="2025-11-28 11:14:48.276394648 +0000 UTC m=+367.405078858" lastFinishedPulling="2025-11-28 11:14:50.727468455 +0000 UTC m=+369.856152665" observedRunningTime="2025-11-28 11:14:51.376585825 +0000 UTC m=+370.505270045" watchObservedRunningTime="2025-11-28 11:14:51.379636161 +0000 UTC m=+370.508320371" Nov 28 11:14:54 crc kubenswrapper[4923]: I1128 11:14:54.357360 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sb2nx" event={"ID":"c2b69365-6bbb-41c8-afba-9d95f8f80d48","Type":"ContainerStarted","Data":"529d328fd67bda38653f233466d3059d6565a62561a7e26b856d530f4d6eec21"} Nov 28 11:14:54 crc kubenswrapper[4923]: I1128 11:14:54.360085 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l2nw6" event={"ID":"4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c","Type":"ContainerDied","Data":"4fbae97adbe8cff4e9b58c21f3dee0cbd6187ffc90196cbb7cd0ce7a9bf0e809"} Nov 28 11:14:54 crc kubenswrapper[4923]: I1128 11:14:54.360094 4923 generic.go:334] "Generic (PLEG): container finished" podID="4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c" containerID="4fbae97adbe8cff4e9b58c21f3dee0cbd6187ffc90196cbb7cd0ce7a9bf0e809" exitCode=0 Nov 28 11:14:54 crc kubenswrapper[4923]: I1128 11:14:54.375976 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-sb2nx" podStartSLOduration=4.359474838 podStartE2EDuration="7.37595837s" podCreationTimestamp="2025-11-28 11:14:47 +0000 UTC" firstStartedPulling="2025-11-28 11:14:49.290808513 +0000 UTC m=+368.419492723" lastFinishedPulling="2025-11-28 11:14:52.307292045 +0000 UTC m=+371.435976255" observedRunningTime="2025-11-28 11:14:54.371871203 +0000 UTC m=+373.500555433" watchObservedRunningTime="2025-11-28 11:14:54.37595837 +0000 UTC m=+373.504642610" Nov 28 11:14:55 crc kubenswrapper[4923]: I1128 11:14:55.368583 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l2nw6" event={"ID":"4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c","Type":"ContainerStarted","Data":"59163bb552586a2571fa88c9e7e4305bec16e1b09c787de0513afad706c45363"} Nov 28 11:14:55 crc kubenswrapper[4923]: I1128 11:14:55.390827 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-l2nw6" podStartSLOduration=2.75289012 podStartE2EDuration="7.390813657s" podCreationTimestamp="2025-11-28 11:14:48 +0000 UTC" firstStartedPulling="2025-11-28 11:14:50.306288549 +0000 UTC m=+369.434972759" lastFinishedPulling="2025-11-28 11:14:54.944212086 +0000 UTC m=+374.072896296" observedRunningTime="2025-11-28 11:14:55.390372035 +0000 UTC m=+374.519056275" watchObservedRunningTime="2025-11-28 11:14:55.390813657 +0000 UTC m=+374.519497867" Nov 28 11:14:55 crc kubenswrapper[4923]: I1128 11:14:55.696568 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rwgwx" Nov 28 11:14:55 crc kubenswrapper[4923]: I1128 11:14:55.696946 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rwgwx" Nov 28 11:14:55 crc kubenswrapper[4923]: I1128 11:14:55.753599 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rwgwx" Nov 28 11:14:56 crc kubenswrapper[4923]: I1128 11:14:56.417179 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rwgwx" Nov 28 11:14:56 crc kubenswrapper[4923]: I1128 11:14:56.706442 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-l2szt" Nov 28 11:14:56 crc kubenswrapper[4923]: I1128 11:14:56.706489 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-l2szt" Nov 28 11:14:56 crc kubenswrapper[4923]: I1128 11:14:56.771646 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-l2szt" Nov 28 11:14:57 crc kubenswrapper[4923]: I1128 11:14:57.430814 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-l2szt" Nov 28 11:14:58 crc kubenswrapper[4923]: I1128 11:14:58.097801 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-sb2nx" Nov 28 11:14:58 crc kubenswrapper[4923]: I1128 11:14:58.097886 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-sb2nx" Nov 28 11:14:58 crc kubenswrapper[4923]: I1128 11:14:58.155034 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-sb2nx" Nov 28 11:14:58 crc kubenswrapper[4923]: I1128 11:14:58.430383 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-sb2nx" Nov 28 11:14:59 crc kubenswrapper[4923]: I1128 11:14:59.113785 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-l2nw6" Nov 28 11:14:59 crc kubenswrapper[4923]: I1128 11:14:59.114667 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-l2nw6" Nov 28 11:15:00 crc kubenswrapper[4923]: I1128 11:15:00.155630 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-l2nw6" podUID="4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c" containerName="registry-server" probeResult="failure" output=< Nov 28 11:15:00 crc kubenswrapper[4923]: timeout: failed to connect service ":50051" within 1s Nov 28 11:15:00 crc kubenswrapper[4923]: > Nov 28 11:15:00 crc kubenswrapper[4923]: I1128 11:15:00.198081 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v"] Nov 28 11:15:00 crc kubenswrapper[4923]: I1128 11:15:00.198873 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v" Nov 28 11:15:00 crc kubenswrapper[4923]: I1128 11:15:00.200732 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 11:15:00 crc kubenswrapper[4923]: I1128 11:15:00.201013 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 11:15:00 crc kubenswrapper[4923]: I1128 11:15:00.210225 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v"] Nov 28 11:15:00 crc kubenswrapper[4923]: I1128 11:15:00.346887 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vvjv\" (UniqueName: \"kubernetes.io/projected/5f35ec69-f377-4418-999d-0089c8362310-kube-api-access-2vvjv\") pod \"collect-profiles-29405475-s8h4v\" (UID: \"5f35ec69-f377-4418-999d-0089c8362310\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v" Nov 28 11:15:00 crc kubenswrapper[4923]: I1128 11:15:00.346950 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f35ec69-f377-4418-999d-0089c8362310-config-volume\") pod \"collect-profiles-29405475-s8h4v\" (UID: \"5f35ec69-f377-4418-999d-0089c8362310\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v" Nov 28 11:15:00 crc kubenswrapper[4923]: I1128 11:15:00.346993 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f35ec69-f377-4418-999d-0089c8362310-secret-volume\") pod \"collect-profiles-29405475-s8h4v\" (UID: \"5f35ec69-f377-4418-999d-0089c8362310\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v" Nov 28 11:15:00 crc kubenswrapper[4923]: I1128 11:15:00.448473 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vvjv\" (UniqueName: \"kubernetes.io/projected/5f35ec69-f377-4418-999d-0089c8362310-kube-api-access-2vvjv\") pod \"collect-profiles-29405475-s8h4v\" (UID: \"5f35ec69-f377-4418-999d-0089c8362310\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v" Nov 28 11:15:00 crc kubenswrapper[4923]: I1128 11:15:00.448513 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f35ec69-f377-4418-999d-0089c8362310-config-volume\") pod \"collect-profiles-29405475-s8h4v\" (UID: \"5f35ec69-f377-4418-999d-0089c8362310\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v" Nov 28 11:15:00 crc kubenswrapper[4923]: I1128 11:15:00.448547 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f35ec69-f377-4418-999d-0089c8362310-secret-volume\") pod \"collect-profiles-29405475-s8h4v\" (UID: \"5f35ec69-f377-4418-999d-0089c8362310\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v" Nov 28 11:15:00 crc kubenswrapper[4923]: I1128 11:15:00.449718 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f35ec69-f377-4418-999d-0089c8362310-config-volume\") pod \"collect-profiles-29405475-s8h4v\" (UID: \"5f35ec69-f377-4418-999d-0089c8362310\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v" Nov 28 11:15:00 crc kubenswrapper[4923]: I1128 11:15:00.453778 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f35ec69-f377-4418-999d-0089c8362310-secret-volume\") pod \"collect-profiles-29405475-s8h4v\" (UID: \"5f35ec69-f377-4418-999d-0089c8362310\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v" Nov 28 11:15:00 crc kubenswrapper[4923]: I1128 11:15:00.465380 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vvjv\" (UniqueName: \"kubernetes.io/projected/5f35ec69-f377-4418-999d-0089c8362310-kube-api-access-2vvjv\") pod \"collect-profiles-29405475-s8h4v\" (UID: \"5f35ec69-f377-4418-999d-0089c8362310\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v" Nov 28 11:15:00 crc kubenswrapper[4923]: I1128 11:15:00.511518 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v" Nov 28 11:15:00 crc kubenswrapper[4923]: I1128 11:15:00.763537 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v"] Nov 28 11:15:00 crc kubenswrapper[4923]: W1128 11:15:00.771012 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f35ec69_f377_4418_999d_0089c8362310.slice/crio-0b751f576370b16ad8312e92878d72ae55bb2aee546caaceed17dc1707b0e08d WatchSource:0}: Error finding container 0b751f576370b16ad8312e92878d72ae55bb2aee546caaceed17dc1707b0e08d: Status 404 returned error can't find the container with id 0b751f576370b16ad8312e92878d72ae55bb2aee546caaceed17dc1707b0e08d Nov 28 11:15:01 crc kubenswrapper[4923]: I1128 11:15:01.398018 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v" event={"ID":"5f35ec69-f377-4418-999d-0089c8362310","Type":"ContainerStarted","Data":"0b751f576370b16ad8312e92878d72ae55bb2aee546caaceed17dc1707b0e08d"} Nov 28 11:15:03 crc kubenswrapper[4923]: I1128 11:15:03.409166 4923 generic.go:334] "Generic (PLEG): container finished" podID="5f35ec69-f377-4418-999d-0089c8362310" containerID="4d41f6ca9fe94d94089fef4435b4d70f17b86a69a9d60abc43649bf4580846c3" exitCode=0 Nov 28 11:15:03 crc kubenswrapper[4923]: I1128 11:15:03.409225 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v" event={"ID":"5f35ec69-f377-4418-999d-0089c8362310","Type":"ContainerDied","Data":"4d41f6ca9fe94d94089fef4435b4d70f17b86a69a9d60abc43649bf4580846c3"} Nov 28 11:15:04 crc kubenswrapper[4923]: I1128 11:15:04.627308 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v" Nov 28 11:15:04 crc kubenswrapper[4923]: I1128 11:15:04.711593 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f35ec69-f377-4418-999d-0089c8362310-secret-volume\") pod \"5f35ec69-f377-4418-999d-0089c8362310\" (UID: \"5f35ec69-f377-4418-999d-0089c8362310\") " Nov 28 11:15:04 crc kubenswrapper[4923]: I1128 11:15:04.711658 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f35ec69-f377-4418-999d-0089c8362310-config-volume\") pod \"5f35ec69-f377-4418-999d-0089c8362310\" (UID: \"5f35ec69-f377-4418-999d-0089c8362310\") " Nov 28 11:15:04 crc kubenswrapper[4923]: I1128 11:15:04.711692 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vvjv\" (UniqueName: \"kubernetes.io/projected/5f35ec69-f377-4418-999d-0089c8362310-kube-api-access-2vvjv\") pod \"5f35ec69-f377-4418-999d-0089c8362310\" (UID: \"5f35ec69-f377-4418-999d-0089c8362310\") " Nov 28 11:15:04 crc kubenswrapper[4923]: I1128 11:15:04.713486 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f35ec69-f377-4418-999d-0089c8362310-config-volume" (OuterVolumeSpecName: "config-volume") pod "5f35ec69-f377-4418-999d-0089c8362310" (UID: "5f35ec69-f377-4418-999d-0089c8362310"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:15:04 crc kubenswrapper[4923]: I1128 11:15:04.722884 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f35ec69-f377-4418-999d-0089c8362310-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5f35ec69-f377-4418-999d-0089c8362310" (UID: "5f35ec69-f377-4418-999d-0089c8362310"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:15:04 crc kubenswrapper[4923]: I1128 11:15:04.727087 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f35ec69-f377-4418-999d-0089c8362310-kube-api-access-2vvjv" (OuterVolumeSpecName: "kube-api-access-2vvjv") pod "5f35ec69-f377-4418-999d-0089c8362310" (UID: "5f35ec69-f377-4418-999d-0089c8362310"). InnerVolumeSpecName "kube-api-access-2vvjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:15:04 crc kubenswrapper[4923]: I1128 11:15:04.813197 4923 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f35ec69-f377-4418-999d-0089c8362310-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 11:15:04 crc kubenswrapper[4923]: I1128 11:15:04.813226 4923 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f35ec69-f377-4418-999d-0089c8362310-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 11:15:04 crc kubenswrapper[4923]: I1128 11:15:04.813235 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vvjv\" (UniqueName: \"kubernetes.io/projected/5f35ec69-f377-4418-999d-0089c8362310-kube-api-access-2vvjv\") on node \"crc\" DevicePath \"\"" Nov 28 11:15:05 crc kubenswrapper[4923]: I1128 11:15:05.424873 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v" event={"ID":"5f35ec69-f377-4418-999d-0089c8362310","Type":"ContainerDied","Data":"0b751f576370b16ad8312e92878d72ae55bb2aee546caaceed17dc1707b0e08d"} Nov 28 11:15:05 crc kubenswrapper[4923]: I1128 11:15:05.424916 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0b751f576370b16ad8312e92878d72ae55bb2aee546caaceed17dc1707b0e08d" Nov 28 11:15:05 crc kubenswrapper[4923]: I1128 11:15:05.425027 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-s8h4v" Nov 28 11:15:09 crc kubenswrapper[4923]: I1128 11:15:09.197747 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-l2nw6" Nov 28 11:15:09 crc kubenswrapper[4923]: I1128 11:15:09.240326 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-l2nw6" Nov 28 11:15:12 crc kubenswrapper[4923]: I1128 11:15:12.552510 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-g855d" podUID="6f176857-50d2-41c7-8237-961e330c629d" containerName="registry" containerID="cri-o://b5b5789b67b9671ae9e00dcfa57e90c7fa0e451e97bfde57dbba3c68139d4ec3" gracePeriod=30 Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.015624 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.147338 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmgnv\" (UniqueName: \"kubernetes.io/projected/6f176857-50d2-41c7-8237-961e330c629d-kube-api-access-tmgnv\") pod \"6f176857-50d2-41c7-8237-961e330c629d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.147417 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6f176857-50d2-41c7-8237-961e330c629d-registry-tls\") pod \"6f176857-50d2-41c7-8237-961e330c629d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.147468 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6f176857-50d2-41c7-8237-961e330c629d-ca-trust-extracted\") pod \"6f176857-50d2-41c7-8237-961e330c629d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.147553 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6f176857-50d2-41c7-8237-961e330c629d-bound-sa-token\") pod \"6f176857-50d2-41c7-8237-961e330c629d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.147596 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6f176857-50d2-41c7-8237-961e330c629d-registry-certificates\") pod \"6f176857-50d2-41c7-8237-961e330c629d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.147633 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f176857-50d2-41c7-8237-961e330c629d-trusted-ca\") pod \"6f176857-50d2-41c7-8237-961e330c629d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.147922 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"6f176857-50d2-41c7-8237-961e330c629d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.148017 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6f176857-50d2-41c7-8237-961e330c629d-installation-pull-secrets\") pod \"6f176857-50d2-41c7-8237-961e330c629d\" (UID: \"6f176857-50d2-41c7-8237-961e330c629d\") " Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.152064 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f176857-50d2-41c7-8237-961e330c629d-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "6f176857-50d2-41c7-8237-961e330c629d" (UID: "6f176857-50d2-41c7-8237-961e330c629d"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.153234 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f176857-50d2-41c7-8237-961e330c629d-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "6f176857-50d2-41c7-8237-961e330c629d" (UID: "6f176857-50d2-41c7-8237-961e330c629d"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.158945 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f176857-50d2-41c7-8237-961e330c629d-kube-api-access-tmgnv" (OuterVolumeSpecName: "kube-api-access-tmgnv") pod "6f176857-50d2-41c7-8237-961e330c629d" (UID: "6f176857-50d2-41c7-8237-961e330c629d"). InnerVolumeSpecName "kube-api-access-tmgnv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.159649 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f176857-50d2-41c7-8237-961e330c629d-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "6f176857-50d2-41c7-8237-961e330c629d" (UID: "6f176857-50d2-41c7-8237-961e330c629d"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.161025 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f176857-50d2-41c7-8237-961e330c629d-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "6f176857-50d2-41c7-8237-961e330c629d" (UID: "6f176857-50d2-41c7-8237-961e330c629d"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.161343 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f176857-50d2-41c7-8237-961e330c629d-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "6f176857-50d2-41c7-8237-961e330c629d" (UID: "6f176857-50d2-41c7-8237-961e330c629d"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.162533 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "6f176857-50d2-41c7-8237-961e330c629d" (UID: "6f176857-50d2-41c7-8237-961e330c629d"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.184148 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f176857-50d2-41c7-8237-961e330c629d-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "6f176857-50d2-41c7-8237-961e330c629d" (UID: "6f176857-50d2-41c7-8237-961e330c629d"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.249736 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmgnv\" (UniqueName: \"kubernetes.io/projected/6f176857-50d2-41c7-8237-961e330c629d-kube-api-access-tmgnv\") on node \"crc\" DevicePath \"\"" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.249812 4923 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6f176857-50d2-41c7-8237-961e330c629d-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.249831 4923 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6f176857-50d2-41c7-8237-961e330c629d-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.249848 4923 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6f176857-50d2-41c7-8237-961e330c629d-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.249865 4923 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6f176857-50d2-41c7-8237-961e330c629d-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.249881 4923 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f176857-50d2-41c7-8237-961e330c629d-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.249898 4923 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6f176857-50d2-41c7-8237-961e330c629d-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.502467 4923 generic.go:334] "Generic (PLEG): container finished" podID="6f176857-50d2-41c7-8237-961e330c629d" containerID="b5b5789b67b9671ae9e00dcfa57e90c7fa0e451e97bfde57dbba3c68139d4ec3" exitCode=0 Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.502532 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-g855d" event={"ID":"6f176857-50d2-41c7-8237-961e330c629d","Type":"ContainerDied","Data":"b5b5789b67b9671ae9e00dcfa57e90c7fa0e451e97bfde57dbba3c68139d4ec3"} Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.502560 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-g855d" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.502634 4923 scope.go:117] "RemoveContainer" containerID="b5b5789b67b9671ae9e00dcfa57e90c7fa0e451e97bfde57dbba3c68139d4ec3" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.502616 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-g855d" event={"ID":"6f176857-50d2-41c7-8237-961e330c629d","Type":"ContainerDied","Data":"13393fcac0183330276fc39d59fadecad01aff0e6a754171801322d18b7b2b6d"} Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.530005 4923 scope.go:117] "RemoveContainer" containerID="b5b5789b67b9671ae9e00dcfa57e90c7fa0e451e97bfde57dbba3c68139d4ec3" Nov 28 11:15:13 crc kubenswrapper[4923]: E1128 11:15:13.530677 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5b5789b67b9671ae9e00dcfa57e90c7fa0e451e97bfde57dbba3c68139d4ec3\": container with ID starting with b5b5789b67b9671ae9e00dcfa57e90c7fa0e451e97bfde57dbba3c68139d4ec3 not found: ID does not exist" containerID="b5b5789b67b9671ae9e00dcfa57e90c7fa0e451e97bfde57dbba3c68139d4ec3" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.530740 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5b5789b67b9671ae9e00dcfa57e90c7fa0e451e97bfde57dbba3c68139d4ec3"} err="failed to get container status \"b5b5789b67b9671ae9e00dcfa57e90c7fa0e451e97bfde57dbba3c68139d4ec3\": rpc error: code = NotFound desc = could not find container \"b5b5789b67b9671ae9e00dcfa57e90c7fa0e451e97bfde57dbba3c68139d4ec3\": container with ID starting with b5b5789b67b9671ae9e00dcfa57e90c7fa0e451e97bfde57dbba3c68139d4ec3 not found: ID does not exist" Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.563618 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-g855d"] Nov 28 11:15:13 crc kubenswrapper[4923]: I1128 11:15:13.573446 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-g855d"] Nov 28 11:15:14 crc kubenswrapper[4923]: I1128 11:15:14.026491 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:15:14 crc kubenswrapper[4923]: I1128 11:15:14.026572 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:15:15 crc kubenswrapper[4923]: I1128 11:15:15.182005 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f176857-50d2-41c7-8237-961e330c629d" path="/var/lib/kubelet/pods/6f176857-50d2-41c7-8237-961e330c629d/volumes" Nov 28 11:15:44 crc kubenswrapper[4923]: I1128 11:15:44.025903 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:15:44 crc kubenswrapper[4923]: I1128 11:15:44.026710 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:15:44 crc kubenswrapper[4923]: I1128 11:15:44.026786 4923 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:15:44 crc kubenswrapper[4923]: I1128 11:15:44.028892 4923 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"61edbc0d7fbd462cd0df9f2876de70a9446d33b2d98ecb642842c37e988e973d"} pod="openshift-machine-config-operator/machine-config-daemon-bwdth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 11:15:44 crc kubenswrapper[4923]: I1128 11:15:44.029096 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" containerID="cri-o://61edbc0d7fbd462cd0df9f2876de70a9446d33b2d98ecb642842c37e988e973d" gracePeriod=600 Nov 28 11:15:44 crc kubenswrapper[4923]: I1128 11:15:44.721742 4923 generic.go:334] "Generic (PLEG): container finished" podID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerID="61edbc0d7fbd462cd0df9f2876de70a9446d33b2d98ecb642842c37e988e973d" exitCode=0 Nov 28 11:15:44 crc kubenswrapper[4923]: I1128 11:15:44.721834 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerDied","Data":"61edbc0d7fbd462cd0df9f2876de70a9446d33b2d98ecb642842c37e988e973d"} Nov 28 11:15:44 crc kubenswrapper[4923]: I1128 11:15:44.722168 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerStarted","Data":"64a773c89c3c8eb963ad3cd621825622fbc3cbefa6e6e24b0ed07fdb1769cf81"} Nov 28 11:15:44 crc kubenswrapper[4923]: I1128 11:15:44.722196 4923 scope.go:117] "RemoveContainer" containerID="9e0494fbf37786a6c8b1524ab2642c29343c3cfef308a6f0988d59f375d732a9" Nov 28 11:17:41 crc kubenswrapper[4923]: I1128 11:17:41.442837 4923 scope.go:117] "RemoveContainer" containerID="fc1ec96a53b6cdebdc3648bd3698a09c44264b73cd21633fd6c7d6357f9be39f" Nov 28 11:17:44 crc kubenswrapper[4923]: I1128 11:17:44.027066 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:17:44 crc kubenswrapper[4923]: I1128 11:17:44.027210 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:18:14 crc kubenswrapper[4923]: I1128 11:18:14.027023 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:18:14 crc kubenswrapper[4923]: I1128 11:18:14.027730 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:18:41 crc kubenswrapper[4923]: I1128 11:18:41.508431 4923 scope.go:117] "RemoveContainer" containerID="40e14a00bb193ffdf61d6605f389e306b9cbfe77a94e8fe5f6e4bb47beb27bae" Nov 28 11:18:41 crc kubenswrapper[4923]: I1128 11:18:41.533251 4923 scope.go:117] "RemoveContainer" containerID="357c8fb2d02d63ba1f049ffe71339e08055730c4c91838dcf695fa385e8077bf" Nov 28 11:18:44 crc kubenswrapper[4923]: I1128 11:18:44.026491 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:18:44 crc kubenswrapper[4923]: I1128 11:18:44.026925 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:18:44 crc kubenswrapper[4923]: I1128 11:18:44.027027 4923 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:18:44 crc kubenswrapper[4923]: I1128 11:18:44.028291 4923 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"64a773c89c3c8eb963ad3cd621825622fbc3cbefa6e6e24b0ed07fdb1769cf81"} pod="openshift-machine-config-operator/machine-config-daemon-bwdth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 11:18:44 crc kubenswrapper[4923]: I1128 11:18:44.028386 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" containerID="cri-o://64a773c89c3c8eb963ad3cd621825622fbc3cbefa6e6e24b0ed07fdb1769cf81" gracePeriod=600 Nov 28 11:18:44 crc kubenswrapper[4923]: I1128 11:18:44.957481 4923 generic.go:334] "Generic (PLEG): container finished" podID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerID="64a773c89c3c8eb963ad3cd621825622fbc3cbefa6e6e24b0ed07fdb1769cf81" exitCode=0 Nov 28 11:18:44 crc kubenswrapper[4923]: I1128 11:18:44.957586 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerDied","Data":"64a773c89c3c8eb963ad3cd621825622fbc3cbefa6e6e24b0ed07fdb1769cf81"} Nov 28 11:18:44 crc kubenswrapper[4923]: I1128 11:18:44.957849 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerStarted","Data":"677ed572a7b0e83cdbaab7053a3f1b65f579449e7b5bb37190e07948114a0b10"} Nov 28 11:18:44 crc kubenswrapper[4923]: I1128 11:18:44.957889 4923 scope.go:117] "RemoveContainer" containerID="61edbc0d7fbd462cd0df9f2876de70a9446d33b2d98ecb642842c37e988e973d" Nov 28 11:20:32 crc kubenswrapper[4923]: I1128 11:20:32.865751 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-jh94j"] Nov 28 11:20:32 crc kubenswrapper[4923]: E1128 11:20:32.866529 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f176857-50d2-41c7-8237-961e330c629d" containerName="registry" Nov 28 11:20:32 crc kubenswrapper[4923]: I1128 11:20:32.866544 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f176857-50d2-41c7-8237-961e330c629d" containerName="registry" Nov 28 11:20:32 crc kubenswrapper[4923]: E1128 11:20:32.866561 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f35ec69-f377-4418-999d-0089c8362310" containerName="collect-profiles" Nov 28 11:20:32 crc kubenswrapper[4923]: I1128 11:20:32.866569 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f35ec69-f377-4418-999d-0089c8362310" containerName="collect-profiles" Nov 28 11:20:32 crc kubenswrapper[4923]: I1128 11:20:32.866679 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f176857-50d2-41c7-8237-961e330c629d" containerName="registry" Nov 28 11:20:32 crc kubenswrapper[4923]: I1128 11:20:32.866693 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f35ec69-f377-4418-999d-0089c8362310" containerName="collect-profiles" Nov 28 11:20:32 crc kubenswrapper[4923]: I1128 11:20:32.867098 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-jh94j" Nov 28 11:20:32 crc kubenswrapper[4923]: I1128 11:20:32.868319 4923 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-gzxbf" Nov 28 11:20:32 crc kubenswrapper[4923]: I1128 11:20:32.869716 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-n5wdm"] Nov 28 11:20:32 crc kubenswrapper[4923]: I1128 11:20:32.870598 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-n5wdm" Nov 28 11:20:32 crc kubenswrapper[4923]: I1128 11:20:32.870796 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 28 11:20:32 crc kubenswrapper[4923]: I1128 11:20:32.871108 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 28 11:20:32 crc kubenswrapper[4923]: I1128 11:20:32.872780 4923 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-dt5pm" Nov 28 11:20:32 crc kubenswrapper[4923]: I1128 11:20:32.883884 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-n5wdm"] Nov 28 11:20:32 crc kubenswrapper[4923]: I1128 11:20:32.890047 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-8tb85"] Nov 28 11:20:32 crc kubenswrapper[4923]: I1128 11:20:32.890751 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-8tb85" Nov 28 11:20:32 crc kubenswrapper[4923]: I1128 11:20:32.892681 4923 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-b6l69" Nov 28 11:20:32 crc kubenswrapper[4923]: I1128 11:20:32.893130 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-jh94j"] Nov 28 11:20:32 crc kubenswrapper[4923]: I1128 11:20:32.914760 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-8tb85"] Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.003449 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xl95w\" (UniqueName: \"kubernetes.io/projected/b4b4fa5c-384b-4de9-8b8f-de1249a7f3c2-kube-api-access-xl95w\") pod \"cert-manager-5b446d88c5-n5wdm\" (UID: \"b4b4fa5c-384b-4de9-8b8f-de1249a7f3c2\") " pod="cert-manager/cert-manager-5b446d88c5-n5wdm" Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.003495 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5j82k\" (UniqueName: \"kubernetes.io/projected/999e67cd-bc9f-4886-9127-80740f13d57c-kube-api-access-5j82k\") pod \"cert-manager-webhook-5655c58dd6-8tb85\" (UID: \"999e67cd-bc9f-4886-9127-80740f13d57c\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-8tb85" Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.003530 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xllsz\" (UniqueName: \"kubernetes.io/projected/2fa1e707-df09-47b5-bfaa-732a6e50d099-kube-api-access-xllsz\") pod \"cert-manager-cainjector-7f985d654d-jh94j\" (UID: \"2fa1e707-df09-47b5-bfaa-732a6e50d099\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-jh94j" Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.105280 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xl95w\" (UniqueName: \"kubernetes.io/projected/b4b4fa5c-384b-4de9-8b8f-de1249a7f3c2-kube-api-access-xl95w\") pod \"cert-manager-5b446d88c5-n5wdm\" (UID: \"b4b4fa5c-384b-4de9-8b8f-de1249a7f3c2\") " pod="cert-manager/cert-manager-5b446d88c5-n5wdm" Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.105730 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5j82k\" (UniqueName: \"kubernetes.io/projected/999e67cd-bc9f-4886-9127-80740f13d57c-kube-api-access-5j82k\") pod \"cert-manager-webhook-5655c58dd6-8tb85\" (UID: \"999e67cd-bc9f-4886-9127-80740f13d57c\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-8tb85" Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.105817 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xllsz\" (UniqueName: \"kubernetes.io/projected/2fa1e707-df09-47b5-bfaa-732a6e50d099-kube-api-access-xllsz\") pod \"cert-manager-cainjector-7f985d654d-jh94j\" (UID: \"2fa1e707-df09-47b5-bfaa-732a6e50d099\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-jh94j" Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.123621 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xl95w\" (UniqueName: \"kubernetes.io/projected/b4b4fa5c-384b-4de9-8b8f-de1249a7f3c2-kube-api-access-xl95w\") pod \"cert-manager-5b446d88c5-n5wdm\" (UID: \"b4b4fa5c-384b-4de9-8b8f-de1249a7f3c2\") " pod="cert-manager/cert-manager-5b446d88c5-n5wdm" Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.125608 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5j82k\" (UniqueName: \"kubernetes.io/projected/999e67cd-bc9f-4886-9127-80740f13d57c-kube-api-access-5j82k\") pod \"cert-manager-webhook-5655c58dd6-8tb85\" (UID: \"999e67cd-bc9f-4886-9127-80740f13d57c\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-8tb85" Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.132887 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xllsz\" (UniqueName: \"kubernetes.io/projected/2fa1e707-df09-47b5-bfaa-732a6e50d099-kube-api-access-xllsz\") pod \"cert-manager-cainjector-7f985d654d-jh94j\" (UID: \"2fa1e707-df09-47b5-bfaa-732a6e50d099\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-jh94j" Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.183652 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-jh94j" Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.190903 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-n5wdm" Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.201633 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-8tb85" Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.465891 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-8tb85"] Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.471908 4923 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.579007 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-jh94j"] Nov 28 11:20:33 crc kubenswrapper[4923]: W1128 11:20:33.653060 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4b4fa5c_384b_4de9_8b8f_de1249a7f3c2.slice/crio-6f04ec6588c15504d7d744043ab3b02caaa883cc2f6a351805778158d263a4dc WatchSource:0}: Error finding container 6f04ec6588c15504d7d744043ab3b02caaa883cc2f6a351805778158d263a4dc: Status 404 returned error can't find the container with id 6f04ec6588c15504d7d744043ab3b02caaa883cc2f6a351805778158d263a4dc Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.660228 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-n5wdm"] Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.709009 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-n5wdm" event={"ID":"b4b4fa5c-384b-4de9-8b8f-de1249a7f3c2","Type":"ContainerStarted","Data":"6f04ec6588c15504d7d744043ab3b02caaa883cc2f6a351805778158d263a4dc"} Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.710296 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-jh94j" event={"ID":"2fa1e707-df09-47b5-bfaa-732a6e50d099","Type":"ContainerStarted","Data":"c670d7b7babbd4741f724bc1550d5ae3bb65a280ea6b40710dbf25b94cffda7c"} Nov 28 11:20:33 crc kubenswrapper[4923]: I1128 11:20:33.711224 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-8tb85" event={"ID":"999e67cd-bc9f-4886-9127-80740f13d57c","Type":"ContainerStarted","Data":"dca9a882a6cf0d78c8c064400ca4f6689faa6f1f450a5892b572f0971fd924d7"} Nov 28 11:20:38 crc kubenswrapper[4923]: I1128 11:20:38.743901 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-8tb85" event={"ID":"999e67cd-bc9f-4886-9127-80740f13d57c","Type":"ContainerStarted","Data":"2764a3fc0d499caf7cfb0a05e866c927b0478295a1d958cab051fea5a7306e12"} Nov 28 11:20:38 crc kubenswrapper[4923]: I1128 11:20:38.744664 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-8tb85" Nov 28 11:20:38 crc kubenswrapper[4923]: I1128 11:20:38.747764 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-n5wdm" event={"ID":"b4b4fa5c-384b-4de9-8b8f-de1249a7f3c2","Type":"ContainerStarted","Data":"77ceda13ac894afb9991994b2393238f58ccaf7afb0025148a2a025a0d685c86"} Nov 28 11:20:38 crc kubenswrapper[4923]: I1128 11:20:38.750319 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-jh94j" event={"ID":"2fa1e707-df09-47b5-bfaa-732a6e50d099","Type":"ContainerStarted","Data":"a504d7f947d3970ce0a498d78500869c65a1e1ebdfab546669bc3ec665d54536"} Nov 28 11:20:38 crc kubenswrapper[4923]: I1128 11:20:38.767417 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-8tb85" podStartSLOduration=2.5613549410000003 podStartE2EDuration="6.767401933s" podCreationTimestamp="2025-11-28 11:20:32 +0000 UTC" firstStartedPulling="2025-11-28 11:20:33.471719706 +0000 UTC m=+712.600403916" lastFinishedPulling="2025-11-28 11:20:37.677766698 +0000 UTC m=+716.806450908" observedRunningTime="2025-11-28 11:20:38.763860313 +0000 UTC m=+717.892544523" watchObservedRunningTime="2025-11-28 11:20:38.767401933 +0000 UTC m=+717.896086143" Nov 28 11:20:38 crc kubenswrapper[4923]: I1128 11:20:38.789522 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-n5wdm" podStartSLOduration=2.773231712 podStartE2EDuration="6.789500015s" podCreationTimestamp="2025-11-28 11:20:32 +0000 UTC" firstStartedPulling="2025-11-28 11:20:33.654452696 +0000 UTC m=+712.783136906" lastFinishedPulling="2025-11-28 11:20:37.670720999 +0000 UTC m=+716.799405209" observedRunningTime="2025-11-28 11:20:38.786750467 +0000 UTC m=+717.915434677" watchObservedRunningTime="2025-11-28 11:20:38.789500015 +0000 UTC m=+717.918184255" Nov 28 11:20:38 crc kubenswrapper[4923]: I1128 11:20:38.813560 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-jh94j" podStartSLOduration=2.6466954019999998 podStartE2EDuration="6.813542071s" podCreationTimestamp="2025-11-28 11:20:32 +0000 UTC" firstStartedPulling="2025-11-28 11:20:33.581262197 +0000 UTC m=+712.709946407" lastFinishedPulling="2025-11-28 11:20:37.748108856 +0000 UTC m=+716.876793076" observedRunningTime="2025-11-28 11:20:38.811077821 +0000 UTC m=+717.939762061" watchObservedRunningTime="2025-11-28 11:20:38.813542071 +0000 UTC m=+717.942226281" Nov 28 11:20:43 crc kubenswrapper[4923]: I1128 11:20:43.206008 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-8tb85" Nov 28 11:20:43 crc kubenswrapper[4923]: I1128 11:20:43.669734 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-68dth"] Nov 28 11:20:43 crc kubenswrapper[4923]: I1128 11:20:43.670640 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovn-controller" containerID="cri-o://4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de" gracePeriod=30 Nov 28 11:20:43 crc kubenswrapper[4923]: I1128 11:20:43.670741 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017" gracePeriod=30 Nov 28 11:20:43 crc kubenswrapper[4923]: I1128 11:20:43.670743 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="nbdb" containerID="cri-o://88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4" gracePeriod=30 Nov 28 11:20:43 crc kubenswrapper[4923]: I1128 11:20:43.670814 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="northd" containerID="cri-o://b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7" gracePeriod=30 Nov 28 11:20:43 crc kubenswrapper[4923]: I1128 11:20:43.670825 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="sbdb" containerID="cri-o://b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65" gracePeriod=30 Nov 28 11:20:43 crc kubenswrapper[4923]: I1128 11:20:43.670842 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovn-acl-logging" containerID="cri-o://a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3" gracePeriod=30 Nov 28 11:20:43 crc kubenswrapper[4923]: I1128 11:20:43.670836 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="kube-rbac-proxy-node" containerID="cri-o://9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7" gracePeriod=30 Nov 28 11:20:43 crc kubenswrapper[4923]: I1128 11:20:43.728492 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovnkube-controller" containerID="cri-o://4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297" gracePeriod=30 Nov 28 11:20:43 crc kubenswrapper[4923]: I1128 11:20:43.786920 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-h5s2m_84374038-67ce-4dc0-a2c2-6eed9650c604/kube-multus/2.log" Nov 28 11:20:43 crc kubenswrapper[4923]: I1128 11:20:43.789130 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-h5s2m_84374038-67ce-4dc0-a2c2-6eed9650c604/kube-multus/1.log" Nov 28 11:20:43 crc kubenswrapper[4923]: I1128 11:20:43.789173 4923 generic.go:334] "Generic (PLEG): container finished" podID="84374038-67ce-4dc0-a2c2-6eed9650c604" containerID="4e5d464fbc192436a17d1b829b59f434eeda1bcd59ca123e60356e99ed41be9a" exitCode=2 Nov 28 11:20:43 crc kubenswrapper[4923]: I1128 11:20:43.789202 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-h5s2m" event={"ID":"84374038-67ce-4dc0-a2c2-6eed9650c604","Type":"ContainerDied","Data":"4e5d464fbc192436a17d1b829b59f434eeda1bcd59ca123e60356e99ed41be9a"} Nov 28 11:20:43 crc kubenswrapper[4923]: I1128 11:20:43.789236 4923 scope.go:117] "RemoveContainer" containerID="53821c93696c6770adcfbe02308f05bdb9635578bd1dfa8d3201ecf94fa8b37c" Nov 28 11:20:43 crc kubenswrapper[4923]: I1128 11:20:43.789724 4923 scope.go:117] "RemoveContainer" containerID="4e5d464fbc192436a17d1b829b59f434eeda1bcd59ca123e60356e99ed41be9a" Nov 28 11:20:43 crc kubenswrapper[4923]: E1128 11:20:43.789885 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-h5s2m_openshift-multus(84374038-67ce-4dc0-a2c2-6eed9650c604)\"" pod="openshift-multus/multus-h5s2m" podUID="84374038-67ce-4dc0-a2c2-6eed9650c604" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.026072 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.026132 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.186718 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovnkube-controller/3.log" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.190173 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovn-acl-logging/0.log" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.190859 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovn-controller/0.log" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.194224 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.287579 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-z697t"] Nov 28 11:20:44 crc kubenswrapper[4923]: E1128 11:20:44.288527 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="kube-rbac-proxy-node" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.288595 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="kube-rbac-proxy-node" Nov 28 11:20:44 crc kubenswrapper[4923]: E1128 11:20:44.288649 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.288697 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 11:20:44 crc kubenswrapper[4923]: E1128 11:20:44.288743 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovnkube-controller" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.288788 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovnkube-controller" Nov 28 11:20:44 crc kubenswrapper[4923]: E1128 11:20:44.288835 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovnkube-controller" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.288881 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovnkube-controller" Nov 28 11:20:44 crc kubenswrapper[4923]: E1128 11:20:44.288945 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovnkube-controller" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.288992 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovnkube-controller" Nov 28 11:20:44 crc kubenswrapper[4923]: E1128 11:20:44.289042 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="kubecfg-setup" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.289090 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="kubecfg-setup" Nov 28 11:20:44 crc kubenswrapper[4923]: E1128 11:20:44.289135 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovn-acl-logging" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.289181 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovn-acl-logging" Nov 28 11:20:44 crc kubenswrapper[4923]: E1128 11:20:44.289234 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="northd" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.289277 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="northd" Nov 28 11:20:44 crc kubenswrapper[4923]: E1128 11:20:44.289327 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovn-controller" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.289372 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovn-controller" Nov 28 11:20:44 crc kubenswrapper[4923]: E1128 11:20:44.289418 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="nbdb" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.289463 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="nbdb" Nov 28 11:20:44 crc kubenswrapper[4923]: E1128 11:20:44.289516 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovnkube-controller" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.289559 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovnkube-controller" Nov 28 11:20:44 crc kubenswrapper[4923]: E1128 11:20:44.289604 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="sbdb" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.289650 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="sbdb" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.289776 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="sbdb" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.289829 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovnkube-controller" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.289883 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovnkube-controller" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.289957 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovn-controller" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.290014 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="northd" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.290062 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovnkube-controller" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.290108 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="kube-rbac-proxy-node" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.290169 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="nbdb" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.290222 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovn-acl-logging" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.290271 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 11:20:44 crc kubenswrapper[4923]: E1128 11:20:44.290408 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovnkube-controller" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.290460 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovnkube-controller" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.290581 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovnkube-controller" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.290782 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerName="ovnkube-controller" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.292095 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383125 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-env-overrides\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383332 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-run-netns\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383418 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383538 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-cni-bin\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383566 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-run-openvswitch\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383575 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383591 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-etc-openvswitch\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383609 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-log-socket\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383619 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383642 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-var-lib-openvswitch\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383649 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383658 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-systemd-units\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383676 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-node-log\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383650 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383696 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-cni-netd\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383678 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-log-socket" (OuterVolumeSpecName: "log-socket") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383728 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383679 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383705 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383716 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-node-log" (OuterVolumeSpecName: "node-log") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383727 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-var-lib-cni-networks-ovn-kubernetes\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383757 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383809 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-ovn-node-metrics-cert\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383837 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-slash\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383860 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-run-systemd\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383882 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-kubelet\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383921 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-ovnkube-config\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383970 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-ovnkube-script-lib\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.383993 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qd9rd\" (UniqueName: \"kubernetes.io/projected/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-kube-api-access-qd9rd\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.384016 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-run-ovn\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.384040 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-run-ovn-kubernetes\") pod \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\" (UID: \"08e03349-56fc-4b2d-93d3-cf2405a4b7ad\") " Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.384176 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.384440 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.384505 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-slash" (OuterVolumeSpecName: "host-slash") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.384803 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.384906 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.385213 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.385448 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-slash\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.385523 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-run-ovn-kubernetes\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.385632 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlqdr\" (UniqueName: \"kubernetes.io/projected/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-kube-api-access-nlqdr\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.385915 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-kubelet\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.385976 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-cni-netd\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386006 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-var-lib-openvswitch\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386028 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-cni-bin\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386057 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-run-openvswitch\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386089 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-ovnkube-script-lib\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386117 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-etc-openvswitch\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386150 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-log-socket\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386189 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386210 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-env-overrides\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386249 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-ovnkube-config\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386271 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-systemd-units\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386294 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-ovn-node-metrics-cert\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386319 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-node-log\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386353 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-run-systemd\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386377 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-run-ovn\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386398 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-run-netns\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386467 4923 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386481 4923 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386495 4923 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386508 4923 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386521 4923 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386532 4923 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386543 4923 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386555 4923 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386565 4923 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-log-socket\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386577 4923 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386589 4923 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386600 4923 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-node-log\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386611 4923 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386622 4923 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386634 4923 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-slash\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386645 4923 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.386657 4923 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.389746 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.391368 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-kube-api-access-qd9rd" (OuterVolumeSpecName: "kube-api-access-qd9rd") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "kube-api-access-qd9rd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.400849 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "08e03349-56fc-4b2d-93d3-cf2405a4b7ad" (UID: "08e03349-56fc-4b2d-93d3-cf2405a4b7ad"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.487389 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-systemd-units\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.487496 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-systemd-units\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.488009 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-ovn-node-metrics-cert\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.488204 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-node-log\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.488440 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-node-log\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.488747 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-run-systemd\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.488909 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-run-ovn\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.488753 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-run-systemd\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.489066 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-run-ovn\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.489342 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-run-netns\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.489150 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-run-netns\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.489785 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-slash\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.489986 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-slash\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.490176 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-run-ovn-kubernetes\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.490333 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-run-ovn-kubernetes\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.490508 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlqdr\" (UniqueName: \"kubernetes.io/projected/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-kube-api-access-nlqdr\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.491167 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-kubelet\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.491235 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-cni-netd\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.491266 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-var-lib-openvswitch\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.491381 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-cni-netd\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.491487 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-var-lib-openvswitch\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.491668 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-cni-bin\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.491747 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-run-openvswitch\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.491786 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-run-openvswitch\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.491666 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-kubelet\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.491815 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-ovnkube-script-lib\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.491897 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-etc-openvswitch\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.491972 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-log-socket\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.492019 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-log-socket\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.492024 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-etc-openvswitch\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.492043 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.492085 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.492095 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-env-overrides\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.492170 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-ovnkube-config\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.492250 4923 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.492274 4923 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.492319 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qd9rd\" (UniqueName: \"kubernetes.io/projected/08e03349-56fc-4b2d-93d3-cf2405a4b7ad-kube-api-access-qd9rd\") on node \"crc\" DevicePath \"\"" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.492620 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-ovnkube-script-lib\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.492850 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-env-overrides\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.493024 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-ovnkube-config\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.493713 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-ovn-node-metrics-cert\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.493973 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-host-cni-bin\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.520016 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlqdr\" (UniqueName: \"kubernetes.io/projected/db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd-kube-api-access-nlqdr\") pod \"ovnkube-node-z697t\" (UID: \"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd\") " pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.607927 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:44 crc kubenswrapper[4923]: W1128 11:20:44.646554 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddb4bdc96_a7b8_45ba_b53f_90f1dafe9bcd.slice/crio-2c2e982d945f98a44fefcef2c36d01d9e52c2804ac2aca342058637adbeac902 WatchSource:0}: Error finding container 2c2e982d945f98a44fefcef2c36d01d9e52c2804ac2aca342058637adbeac902: Status 404 returned error can't find the container with id 2c2e982d945f98a44fefcef2c36d01d9e52c2804ac2aca342058637adbeac902 Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.814156 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovnkube-controller/3.log" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.819683 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovn-acl-logging/0.log" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.820635 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-68dth_08e03349-56fc-4b2d-93d3-cf2405a4b7ad/ovn-controller/0.log" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821317 4923 generic.go:334] "Generic (PLEG): container finished" podID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerID="4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297" exitCode=0 Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821346 4923 generic.go:334] "Generic (PLEG): container finished" podID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerID="b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65" exitCode=0 Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821364 4923 generic.go:334] "Generic (PLEG): container finished" podID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerID="88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4" exitCode=0 Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821377 4923 generic.go:334] "Generic (PLEG): container finished" podID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerID="b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7" exitCode=0 Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821392 4923 generic.go:334] "Generic (PLEG): container finished" podID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerID="b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017" exitCode=0 Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821405 4923 generic.go:334] "Generic (PLEG): container finished" podID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerID="9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7" exitCode=0 Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821419 4923 generic.go:334] "Generic (PLEG): container finished" podID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerID="a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3" exitCode=143 Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821433 4923 generic.go:334] "Generic (PLEG): container finished" podID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" containerID="4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de" exitCode=143 Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821493 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerDied","Data":"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821535 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerDied","Data":"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821556 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerDied","Data":"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821574 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerDied","Data":"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821592 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerDied","Data":"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821615 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerDied","Data":"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821633 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821651 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821662 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821674 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821685 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821696 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821707 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821717 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821727 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821741 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerDied","Data":"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821757 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821769 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821779 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821790 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821800 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821811 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821823 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821833 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821843 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821856 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821871 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerDied","Data":"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821886 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821899 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821910 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821920 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821963 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821975 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.821988 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.822002 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.822015 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.822030 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.822051 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" event={"ID":"08e03349-56fc-4b2d-93d3-cf2405a4b7ad","Type":"ContainerDied","Data":"8407acc2a5d77884103b79f7b2c84a1e5ab69c4c0f6379e4e358d33b72c0c070"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.822075 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.822091 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.822105 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.822120 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.822133 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.822146 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.822161 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.822173 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.822186 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.822200 4923 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.822227 4923 scope.go:117] "RemoveContainer" containerID="4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.822419 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-68dth" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.832473 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" event={"ID":"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd","Type":"ContainerStarted","Data":"a3a48b12c403890029dc2485155a3ab3566b0cc59d93563fb6796d20ae9b408d"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.832536 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" event={"ID":"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd","Type":"ContainerStarted","Data":"2c2e982d945f98a44fefcef2c36d01d9e52c2804ac2aca342058637adbeac902"} Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.835644 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-h5s2m_84374038-67ce-4dc0-a2c2-6eed9650c604/kube-multus/2.log" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.921852 4923 scope.go:117] "RemoveContainer" containerID="38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.931685 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-68dth"] Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.935142 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-68dth"] Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.950084 4923 scope.go:117] "RemoveContainer" containerID="b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65" Nov 28 11:20:44 crc kubenswrapper[4923]: I1128 11:20:44.991496 4923 scope.go:117] "RemoveContainer" containerID="88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.007674 4923 scope.go:117] "RemoveContainer" containerID="b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.029547 4923 scope.go:117] "RemoveContainer" containerID="b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.046576 4923 scope.go:117] "RemoveContainer" containerID="9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.071072 4923 scope.go:117] "RemoveContainer" containerID="a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.090258 4923 scope.go:117] "RemoveContainer" containerID="4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.102165 4923 scope.go:117] "RemoveContainer" containerID="18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.113739 4923 scope.go:117] "RemoveContainer" containerID="4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297" Nov 28 11:20:45 crc kubenswrapper[4923]: E1128 11:20:45.114199 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297\": container with ID starting with 4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297 not found: ID does not exist" containerID="4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.114223 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297"} err="failed to get container status \"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297\": rpc error: code = NotFound desc = could not find container \"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297\": container with ID starting with 4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.114248 4923 scope.go:117] "RemoveContainer" containerID="38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611" Nov 28 11:20:45 crc kubenswrapper[4923]: E1128 11:20:45.114505 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611\": container with ID starting with 38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611 not found: ID does not exist" containerID="38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.114523 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611"} err="failed to get container status \"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611\": rpc error: code = NotFound desc = could not find container \"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611\": container with ID starting with 38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.114535 4923 scope.go:117] "RemoveContainer" containerID="b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65" Nov 28 11:20:45 crc kubenswrapper[4923]: E1128 11:20:45.114782 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\": container with ID starting with b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65 not found: ID does not exist" containerID="b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.114801 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65"} err="failed to get container status \"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\": rpc error: code = NotFound desc = could not find container \"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\": container with ID starting with b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.114814 4923 scope.go:117] "RemoveContainer" containerID="88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4" Nov 28 11:20:45 crc kubenswrapper[4923]: E1128 11:20:45.115085 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\": container with ID starting with 88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4 not found: ID does not exist" containerID="88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.115106 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4"} err="failed to get container status \"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\": rpc error: code = NotFound desc = could not find container \"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\": container with ID starting with 88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.115119 4923 scope.go:117] "RemoveContainer" containerID="b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7" Nov 28 11:20:45 crc kubenswrapper[4923]: E1128 11:20:45.115357 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\": container with ID starting with b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7 not found: ID does not exist" containerID="b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.115376 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7"} err="failed to get container status \"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\": rpc error: code = NotFound desc = could not find container \"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\": container with ID starting with b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.115388 4923 scope.go:117] "RemoveContainer" containerID="b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017" Nov 28 11:20:45 crc kubenswrapper[4923]: E1128 11:20:45.115599 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\": container with ID starting with b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017 not found: ID does not exist" containerID="b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.115615 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017"} err="failed to get container status \"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\": rpc error: code = NotFound desc = could not find container \"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\": container with ID starting with b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.115627 4923 scope.go:117] "RemoveContainer" containerID="9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7" Nov 28 11:20:45 crc kubenswrapper[4923]: E1128 11:20:45.115825 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\": container with ID starting with 9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7 not found: ID does not exist" containerID="9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.115843 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7"} err="failed to get container status \"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\": rpc error: code = NotFound desc = could not find container \"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\": container with ID starting with 9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.115854 4923 scope.go:117] "RemoveContainer" containerID="a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3" Nov 28 11:20:45 crc kubenswrapper[4923]: E1128 11:20:45.116024 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\": container with ID starting with a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3 not found: ID does not exist" containerID="a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.116044 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3"} err="failed to get container status \"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\": rpc error: code = NotFound desc = could not find container \"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\": container with ID starting with a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.116056 4923 scope.go:117] "RemoveContainer" containerID="4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de" Nov 28 11:20:45 crc kubenswrapper[4923]: E1128 11:20:45.116281 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\": container with ID starting with 4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de not found: ID does not exist" containerID="4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.116301 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de"} err="failed to get container status \"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\": rpc error: code = NotFound desc = could not find container \"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\": container with ID starting with 4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.116314 4923 scope.go:117] "RemoveContainer" containerID="18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f" Nov 28 11:20:45 crc kubenswrapper[4923]: E1128 11:20:45.116547 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\": container with ID starting with 18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f not found: ID does not exist" containerID="18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.116563 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f"} err="failed to get container status \"18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\": rpc error: code = NotFound desc = could not find container \"18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\": container with ID starting with 18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.116575 4923 scope.go:117] "RemoveContainer" containerID="4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.116754 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297"} err="failed to get container status \"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297\": rpc error: code = NotFound desc = could not find container \"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297\": container with ID starting with 4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.116767 4923 scope.go:117] "RemoveContainer" containerID="38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.116963 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611"} err="failed to get container status \"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611\": rpc error: code = NotFound desc = could not find container \"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611\": container with ID starting with 38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.116979 4923 scope.go:117] "RemoveContainer" containerID="b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.117225 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65"} err="failed to get container status \"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\": rpc error: code = NotFound desc = could not find container \"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\": container with ID starting with b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.117240 4923 scope.go:117] "RemoveContainer" containerID="88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.117527 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4"} err="failed to get container status \"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\": rpc error: code = NotFound desc = could not find container \"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\": container with ID starting with 88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.117542 4923 scope.go:117] "RemoveContainer" containerID="b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.117721 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7"} err="failed to get container status \"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\": rpc error: code = NotFound desc = could not find container \"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\": container with ID starting with b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.117737 4923 scope.go:117] "RemoveContainer" containerID="b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.118173 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017"} err="failed to get container status \"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\": rpc error: code = NotFound desc = could not find container \"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\": container with ID starting with b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.118193 4923 scope.go:117] "RemoveContainer" containerID="9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.118359 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7"} err="failed to get container status \"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\": rpc error: code = NotFound desc = could not find container \"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\": container with ID starting with 9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.118376 4923 scope.go:117] "RemoveContainer" containerID="a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.118574 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3"} err="failed to get container status \"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\": rpc error: code = NotFound desc = could not find container \"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\": container with ID starting with a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.118592 4923 scope.go:117] "RemoveContainer" containerID="4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.118854 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de"} err="failed to get container status \"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\": rpc error: code = NotFound desc = could not find container \"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\": container with ID starting with 4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.118872 4923 scope.go:117] "RemoveContainer" containerID="18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.119315 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f"} err="failed to get container status \"18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\": rpc error: code = NotFound desc = could not find container \"18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\": container with ID starting with 18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.119339 4923 scope.go:117] "RemoveContainer" containerID="4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.119581 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297"} err="failed to get container status \"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297\": rpc error: code = NotFound desc = could not find container \"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297\": container with ID starting with 4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.119605 4923 scope.go:117] "RemoveContainer" containerID="38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.119956 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611"} err="failed to get container status \"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611\": rpc error: code = NotFound desc = could not find container \"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611\": container with ID starting with 38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.119982 4923 scope.go:117] "RemoveContainer" containerID="b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.120352 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65"} err="failed to get container status \"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\": rpc error: code = NotFound desc = could not find container \"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\": container with ID starting with b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.120382 4923 scope.go:117] "RemoveContainer" containerID="88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.120712 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4"} err="failed to get container status \"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\": rpc error: code = NotFound desc = could not find container \"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\": container with ID starting with 88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.120738 4923 scope.go:117] "RemoveContainer" containerID="b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.121097 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7"} err="failed to get container status \"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\": rpc error: code = NotFound desc = could not find container \"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\": container with ID starting with b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.121125 4923 scope.go:117] "RemoveContainer" containerID="b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.121444 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017"} err="failed to get container status \"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\": rpc error: code = NotFound desc = could not find container \"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\": container with ID starting with b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.121470 4923 scope.go:117] "RemoveContainer" containerID="9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.121785 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7"} err="failed to get container status \"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\": rpc error: code = NotFound desc = could not find container \"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\": container with ID starting with 9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.121804 4923 scope.go:117] "RemoveContainer" containerID="a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.122238 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3"} err="failed to get container status \"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\": rpc error: code = NotFound desc = could not find container \"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\": container with ID starting with a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.122266 4923 scope.go:117] "RemoveContainer" containerID="4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.122715 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de"} err="failed to get container status \"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\": rpc error: code = NotFound desc = could not find container \"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\": container with ID starting with 4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.122741 4923 scope.go:117] "RemoveContainer" containerID="18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.123133 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f"} err="failed to get container status \"18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\": rpc error: code = NotFound desc = could not find container \"18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\": container with ID starting with 18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.123158 4923 scope.go:117] "RemoveContainer" containerID="4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.123367 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297"} err="failed to get container status \"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297\": rpc error: code = NotFound desc = could not find container \"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297\": container with ID starting with 4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.123392 4923 scope.go:117] "RemoveContainer" containerID="38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.123619 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611"} err="failed to get container status \"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611\": rpc error: code = NotFound desc = could not find container \"38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611\": container with ID starting with 38c546a3fbd5195bd0602ef14f92ec2bfd832e3f46cce2b709483a90a97e1611 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.123642 4923 scope.go:117] "RemoveContainer" containerID="b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.124000 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65"} err="failed to get container status \"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\": rpc error: code = NotFound desc = could not find container \"b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65\": container with ID starting with b7c2e3f2c83ec1b586a9478fb8d23caccab36a0fe08a3f0907a7b0cb2e67af65 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.124029 4923 scope.go:117] "RemoveContainer" containerID="88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.124351 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4"} err="failed to get container status \"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\": rpc error: code = NotFound desc = could not find container \"88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4\": container with ID starting with 88bb4ac52c4706ca3d80080efb31eff071b89651d1a474b4c0c11ed5559ee7a4 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.124379 4923 scope.go:117] "RemoveContainer" containerID="b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.124627 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7"} err="failed to get container status \"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\": rpc error: code = NotFound desc = could not find container \"b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7\": container with ID starting with b7b206747c810fe48a3d4269cdf80dce693f2d075510aabb42ef2c6dbbea97e7 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.124652 4923 scope.go:117] "RemoveContainer" containerID="b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.125025 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017"} err="failed to get container status \"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\": rpc error: code = NotFound desc = could not find container \"b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017\": container with ID starting with b3c01dc5b138b3d245898dd4a01c5e81350afe6fabfe9e0333589cd9439d4017 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.125049 4923 scope.go:117] "RemoveContainer" containerID="9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.125336 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7"} err="failed to get container status \"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\": rpc error: code = NotFound desc = could not find container \"9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7\": container with ID starting with 9ee3c047cb59b98c8394618e6194fc477b983a7039581951378c69698b307ee7 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.125360 4923 scope.go:117] "RemoveContainer" containerID="a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.125591 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3"} err="failed to get container status \"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\": rpc error: code = NotFound desc = could not find container \"a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3\": container with ID starting with a7489bfb225a27d96b70124820fb1924580c08b3355ef948335f881d7646a8a3 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.125618 4923 scope.go:117] "RemoveContainer" containerID="4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.125982 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de"} err="failed to get container status \"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\": rpc error: code = NotFound desc = could not find container \"4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de\": container with ID starting with 4bc7c6e0b076f04ba7810c82578147a9a3af59d3393e8effb111c299583aa6de not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.126014 4923 scope.go:117] "RemoveContainer" containerID="18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.126262 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f"} err="failed to get container status \"18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\": rpc error: code = NotFound desc = could not find container \"18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f\": container with ID starting with 18ff55d5eeff94f2282f2241a2ab4c47c0ac81e3024f7c00b0b0667fabc3e55f not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.126287 4923 scope.go:117] "RemoveContainer" containerID="4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.126488 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297"} err="failed to get container status \"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297\": rpc error: code = NotFound desc = could not find container \"4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297\": container with ID starting with 4173e729eed9162f6bf7b08d2ce1e2432fe973fa48a87354e9ae9d0057caf297 not found: ID does not exist" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.174454 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08e03349-56fc-4b2d-93d3-cf2405a4b7ad" path="/var/lib/kubelet/pods/08e03349-56fc-4b2d-93d3-cf2405a4b7ad/volumes" Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.844850 4923 generic.go:334] "Generic (PLEG): container finished" podID="db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd" containerID="a3a48b12c403890029dc2485155a3ab3566b0cc59d93563fb6796d20ae9b408d" exitCode=0 Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.845018 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" event={"ID":"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd","Type":"ContainerDied","Data":"a3a48b12c403890029dc2485155a3ab3566b0cc59d93563fb6796d20ae9b408d"} Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.845231 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" event={"ID":"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd","Type":"ContainerStarted","Data":"f287c5bb6515f24b594225a420251681047ef7dc5652170125fd14f97b4a3f49"} Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.845250 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" event={"ID":"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd","Type":"ContainerStarted","Data":"fa4d85ff88dd3781922a3b1b41c4ff644145b37ff1991143ea9bd183117f730e"} Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.845264 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" event={"ID":"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd","Type":"ContainerStarted","Data":"f35ccb42ac16f6de6510e07f2ac95e8c1896250b1a48bc450cea49d02b47bea5"} Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.845279 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" event={"ID":"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd","Type":"ContainerStarted","Data":"ddca1400dab534abc747a485606bdefb89e10a26b1f06b9aa7a030cd623d2e28"} Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.845291 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" event={"ID":"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd","Type":"ContainerStarted","Data":"f7d92296956b84c26b221cf62aec1cc4f3e695b698b971362e023d0e47dbc079"} Nov 28 11:20:45 crc kubenswrapper[4923]: I1128 11:20:45.845302 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" event={"ID":"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd","Type":"ContainerStarted","Data":"7bc47e6203fb6a4fc4d398382ece2dc5fd9d8d2664cfbe342a7151ba35a5ab87"} Nov 28 11:20:47 crc kubenswrapper[4923]: I1128 11:20:47.863816 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" event={"ID":"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd","Type":"ContainerStarted","Data":"fa785d79db8ee34cf1e4259300eec73936916cdb76483aef627f1c05583f38ab"} Nov 28 11:20:50 crc kubenswrapper[4923]: I1128 11:20:50.894329 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" event={"ID":"db4bdc96-a7b8-45ba-b53f-90f1dafe9bcd","Type":"ContainerStarted","Data":"2cc9336b20a7f5596f614703eba511a6642d1b80565bbf01117c270659184f00"} Nov 28 11:20:50 crc kubenswrapper[4923]: I1128 11:20:50.895086 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:50 crc kubenswrapper[4923]: I1128 11:20:50.895111 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:50 crc kubenswrapper[4923]: I1128 11:20:50.927591 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:50 crc kubenswrapper[4923]: I1128 11:20:50.942714 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" podStartSLOduration=6.942699175 podStartE2EDuration="6.942699175s" podCreationTimestamp="2025-11-28 11:20:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:20:50.940151784 +0000 UTC m=+730.068835994" watchObservedRunningTime="2025-11-28 11:20:50.942699175 +0000 UTC m=+730.071383395" Nov 28 11:20:51 crc kubenswrapper[4923]: I1128 11:20:51.900685 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:51 crc kubenswrapper[4923]: I1128 11:20:51.927502 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:20:55 crc kubenswrapper[4923]: I1128 11:20:55.169527 4923 scope.go:117] "RemoveContainer" containerID="4e5d464fbc192436a17d1b829b59f434eeda1bcd59ca123e60356e99ed41be9a" Nov 28 11:20:55 crc kubenswrapper[4923]: I1128 11:20:55.931682 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-h5s2m_84374038-67ce-4dc0-a2c2-6eed9650c604/kube-multus/2.log" Nov 28 11:20:55 crc kubenswrapper[4923]: I1128 11:20:55.932010 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-h5s2m" event={"ID":"84374038-67ce-4dc0-a2c2-6eed9650c604","Type":"ContainerStarted","Data":"b4d1381c56c93df7ef03416a962fa28a90a8e7000f79bac2040e4720dde72f13"} Nov 28 11:21:11 crc kubenswrapper[4923]: I1128 11:21:11.657117 4923 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 11:21:14 crc kubenswrapper[4923]: I1128 11:21:14.026528 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:21:14 crc kubenswrapper[4923]: I1128 11:21:14.028441 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:21:14 crc kubenswrapper[4923]: I1128 11:21:14.642724 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-z697t" Nov 28 11:21:26 crc kubenswrapper[4923]: I1128 11:21:26.567777 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69"] Nov 28 11:21:26 crc kubenswrapper[4923]: I1128 11:21:26.569701 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69" Nov 28 11:21:26 crc kubenswrapper[4923]: I1128 11:21:26.572410 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 11:21:26 crc kubenswrapper[4923]: I1128 11:21:26.582624 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69"] Nov 28 11:21:26 crc kubenswrapper[4923]: I1128 11:21:26.655510 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/84d6dae5-7d92-46d7-bc8b-56c31c5900f2-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69\" (UID: \"84d6dae5-7d92-46d7-bc8b-56c31c5900f2\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69" Nov 28 11:21:26 crc kubenswrapper[4923]: I1128 11:21:26.655554 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/84d6dae5-7d92-46d7-bc8b-56c31c5900f2-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69\" (UID: \"84d6dae5-7d92-46d7-bc8b-56c31c5900f2\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69" Nov 28 11:21:26 crc kubenswrapper[4923]: I1128 11:21:26.655617 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjt7j\" (UniqueName: \"kubernetes.io/projected/84d6dae5-7d92-46d7-bc8b-56c31c5900f2-kube-api-access-kjt7j\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69\" (UID: \"84d6dae5-7d92-46d7-bc8b-56c31c5900f2\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69" Nov 28 11:21:26 crc kubenswrapper[4923]: I1128 11:21:26.757167 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/84d6dae5-7d92-46d7-bc8b-56c31c5900f2-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69\" (UID: \"84d6dae5-7d92-46d7-bc8b-56c31c5900f2\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69" Nov 28 11:21:26 crc kubenswrapper[4923]: I1128 11:21:26.757217 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/84d6dae5-7d92-46d7-bc8b-56c31c5900f2-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69\" (UID: \"84d6dae5-7d92-46d7-bc8b-56c31c5900f2\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69" Nov 28 11:21:26 crc kubenswrapper[4923]: I1128 11:21:26.757304 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjt7j\" (UniqueName: \"kubernetes.io/projected/84d6dae5-7d92-46d7-bc8b-56c31c5900f2-kube-api-access-kjt7j\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69\" (UID: \"84d6dae5-7d92-46d7-bc8b-56c31c5900f2\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69" Nov 28 11:21:26 crc kubenswrapper[4923]: I1128 11:21:26.757892 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/84d6dae5-7d92-46d7-bc8b-56c31c5900f2-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69\" (UID: \"84d6dae5-7d92-46d7-bc8b-56c31c5900f2\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69" Nov 28 11:21:26 crc kubenswrapper[4923]: I1128 11:21:26.757962 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/84d6dae5-7d92-46d7-bc8b-56c31c5900f2-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69\" (UID: \"84d6dae5-7d92-46d7-bc8b-56c31c5900f2\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69" Nov 28 11:21:26 crc kubenswrapper[4923]: I1128 11:21:26.781647 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjt7j\" (UniqueName: \"kubernetes.io/projected/84d6dae5-7d92-46d7-bc8b-56c31c5900f2-kube-api-access-kjt7j\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69\" (UID: \"84d6dae5-7d92-46d7-bc8b-56c31c5900f2\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69" Nov 28 11:21:26 crc kubenswrapper[4923]: I1128 11:21:26.885019 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69" Nov 28 11:21:27 crc kubenswrapper[4923]: I1128 11:21:27.152884 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69"] Nov 28 11:21:28 crc kubenswrapper[4923]: I1128 11:21:28.161795 4923 generic.go:334] "Generic (PLEG): container finished" podID="84d6dae5-7d92-46d7-bc8b-56c31c5900f2" containerID="84300fbcde35e372c4315901e0d41abef77009094ed713ce09793f3d582dd218" exitCode=0 Nov 28 11:21:28 crc kubenswrapper[4923]: I1128 11:21:28.161964 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69" event={"ID":"84d6dae5-7d92-46d7-bc8b-56c31c5900f2","Type":"ContainerDied","Data":"84300fbcde35e372c4315901e0d41abef77009094ed713ce09793f3d582dd218"} Nov 28 11:21:28 crc kubenswrapper[4923]: I1128 11:21:28.164082 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69" event={"ID":"84d6dae5-7d92-46d7-bc8b-56c31c5900f2","Type":"ContainerStarted","Data":"0f6db366efd8dd47850c02a9d0894fb1445a5cf9997812cd2ac724ef1d657a22"} Nov 28 11:21:28 crc kubenswrapper[4923]: I1128 11:21:28.927475 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7cmr5"] Nov 28 11:21:28 crc kubenswrapper[4923]: I1128 11:21:28.929432 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7cmr5" Nov 28 11:21:28 crc kubenswrapper[4923]: I1128 11:21:28.961872 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7cmr5"] Nov 28 11:21:29 crc kubenswrapper[4923]: I1128 11:21:29.089873 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87586000-11ac-48c2-bee1-b8bb3c840b0e-catalog-content\") pod \"redhat-operators-7cmr5\" (UID: \"87586000-11ac-48c2-bee1-b8bb3c840b0e\") " pod="openshift-marketplace/redhat-operators-7cmr5" Nov 28 11:21:29 crc kubenswrapper[4923]: I1128 11:21:29.090142 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87586000-11ac-48c2-bee1-b8bb3c840b0e-utilities\") pod \"redhat-operators-7cmr5\" (UID: \"87586000-11ac-48c2-bee1-b8bb3c840b0e\") " pod="openshift-marketplace/redhat-operators-7cmr5" Nov 28 11:21:29 crc kubenswrapper[4923]: I1128 11:21:29.090279 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8t8km\" (UniqueName: \"kubernetes.io/projected/87586000-11ac-48c2-bee1-b8bb3c840b0e-kube-api-access-8t8km\") pod \"redhat-operators-7cmr5\" (UID: \"87586000-11ac-48c2-bee1-b8bb3c840b0e\") " pod="openshift-marketplace/redhat-operators-7cmr5" Nov 28 11:21:29 crc kubenswrapper[4923]: I1128 11:21:29.191509 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8t8km\" (UniqueName: \"kubernetes.io/projected/87586000-11ac-48c2-bee1-b8bb3c840b0e-kube-api-access-8t8km\") pod \"redhat-operators-7cmr5\" (UID: \"87586000-11ac-48c2-bee1-b8bb3c840b0e\") " pod="openshift-marketplace/redhat-operators-7cmr5" Nov 28 11:21:29 crc kubenswrapper[4923]: I1128 11:21:29.191583 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87586000-11ac-48c2-bee1-b8bb3c840b0e-catalog-content\") pod \"redhat-operators-7cmr5\" (UID: \"87586000-11ac-48c2-bee1-b8bb3c840b0e\") " pod="openshift-marketplace/redhat-operators-7cmr5" Nov 28 11:21:29 crc kubenswrapper[4923]: I1128 11:21:29.191648 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87586000-11ac-48c2-bee1-b8bb3c840b0e-utilities\") pod \"redhat-operators-7cmr5\" (UID: \"87586000-11ac-48c2-bee1-b8bb3c840b0e\") " pod="openshift-marketplace/redhat-operators-7cmr5" Nov 28 11:21:29 crc kubenswrapper[4923]: I1128 11:21:29.192149 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87586000-11ac-48c2-bee1-b8bb3c840b0e-catalog-content\") pod \"redhat-operators-7cmr5\" (UID: \"87586000-11ac-48c2-bee1-b8bb3c840b0e\") " pod="openshift-marketplace/redhat-operators-7cmr5" Nov 28 11:21:29 crc kubenswrapper[4923]: I1128 11:21:29.192204 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87586000-11ac-48c2-bee1-b8bb3c840b0e-utilities\") pod \"redhat-operators-7cmr5\" (UID: \"87586000-11ac-48c2-bee1-b8bb3c840b0e\") " pod="openshift-marketplace/redhat-operators-7cmr5" Nov 28 11:21:29 crc kubenswrapper[4923]: I1128 11:21:29.220655 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8t8km\" (UniqueName: \"kubernetes.io/projected/87586000-11ac-48c2-bee1-b8bb3c840b0e-kube-api-access-8t8km\") pod \"redhat-operators-7cmr5\" (UID: \"87586000-11ac-48c2-bee1-b8bb3c840b0e\") " pod="openshift-marketplace/redhat-operators-7cmr5" Nov 28 11:21:29 crc kubenswrapper[4923]: I1128 11:21:29.254791 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7cmr5" Nov 28 11:21:29 crc kubenswrapper[4923]: I1128 11:21:29.452548 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7cmr5"] Nov 28 11:21:30 crc kubenswrapper[4923]: I1128 11:21:30.176519 4923 generic.go:334] "Generic (PLEG): container finished" podID="87586000-11ac-48c2-bee1-b8bb3c840b0e" containerID="e1e0e50ca90237f91e8dd73c787155e16f1a76474955e578902633550d24afb5" exitCode=0 Nov 28 11:21:30 crc kubenswrapper[4923]: I1128 11:21:30.176589 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7cmr5" event={"ID":"87586000-11ac-48c2-bee1-b8bb3c840b0e","Type":"ContainerDied","Data":"e1e0e50ca90237f91e8dd73c787155e16f1a76474955e578902633550d24afb5"} Nov 28 11:21:30 crc kubenswrapper[4923]: I1128 11:21:30.176615 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7cmr5" event={"ID":"87586000-11ac-48c2-bee1-b8bb3c840b0e","Type":"ContainerStarted","Data":"c26183dae36e36929caeb8bd702808304948030868f8f07d5914ca16fed113fb"} Nov 28 11:21:30 crc kubenswrapper[4923]: I1128 11:21:30.183169 4923 generic.go:334] "Generic (PLEG): container finished" podID="84d6dae5-7d92-46d7-bc8b-56c31c5900f2" containerID="a8e66d5ce1375079ced3c836afe12a994dbd74e4ea0571d8de30bb8681ae5652" exitCode=0 Nov 28 11:21:30 crc kubenswrapper[4923]: I1128 11:21:30.183222 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69" event={"ID":"84d6dae5-7d92-46d7-bc8b-56c31c5900f2","Type":"ContainerDied","Data":"a8e66d5ce1375079ced3c836afe12a994dbd74e4ea0571d8de30bb8681ae5652"} Nov 28 11:21:31 crc kubenswrapper[4923]: I1128 11:21:31.191510 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7cmr5" event={"ID":"87586000-11ac-48c2-bee1-b8bb3c840b0e","Type":"ContainerStarted","Data":"b304447d367325c0290e2aa69e8a9b377ac66b8eb440967ddb14049054827b63"} Nov 28 11:21:31 crc kubenswrapper[4923]: I1128 11:21:31.196025 4923 generic.go:334] "Generic (PLEG): container finished" podID="84d6dae5-7d92-46d7-bc8b-56c31c5900f2" containerID="18281a108dc0fdaebd88e20536fb78353763c2e5ee4fd54f5a55632e60d672a4" exitCode=0 Nov 28 11:21:31 crc kubenswrapper[4923]: I1128 11:21:31.196076 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69" event={"ID":"84d6dae5-7d92-46d7-bc8b-56c31c5900f2","Type":"ContainerDied","Data":"18281a108dc0fdaebd88e20536fb78353763c2e5ee4fd54f5a55632e60d672a4"} Nov 28 11:21:32 crc kubenswrapper[4923]: I1128 11:21:32.206079 4923 generic.go:334] "Generic (PLEG): container finished" podID="87586000-11ac-48c2-bee1-b8bb3c840b0e" containerID="b304447d367325c0290e2aa69e8a9b377ac66b8eb440967ddb14049054827b63" exitCode=0 Nov 28 11:21:32 crc kubenswrapper[4923]: I1128 11:21:32.207347 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7cmr5" event={"ID":"87586000-11ac-48c2-bee1-b8bb3c840b0e","Type":"ContainerDied","Data":"b304447d367325c0290e2aa69e8a9b377ac66b8eb440967ddb14049054827b63"} Nov 28 11:21:32 crc kubenswrapper[4923]: I1128 11:21:32.529913 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69" Nov 28 11:21:32 crc kubenswrapper[4923]: I1128 11:21:32.642058 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kjt7j\" (UniqueName: \"kubernetes.io/projected/84d6dae5-7d92-46d7-bc8b-56c31c5900f2-kube-api-access-kjt7j\") pod \"84d6dae5-7d92-46d7-bc8b-56c31c5900f2\" (UID: \"84d6dae5-7d92-46d7-bc8b-56c31c5900f2\") " Nov 28 11:21:32 crc kubenswrapper[4923]: I1128 11:21:32.642218 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/84d6dae5-7d92-46d7-bc8b-56c31c5900f2-util\") pod \"84d6dae5-7d92-46d7-bc8b-56c31c5900f2\" (UID: \"84d6dae5-7d92-46d7-bc8b-56c31c5900f2\") " Nov 28 11:21:32 crc kubenswrapper[4923]: I1128 11:21:32.642288 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/84d6dae5-7d92-46d7-bc8b-56c31c5900f2-bundle\") pod \"84d6dae5-7d92-46d7-bc8b-56c31c5900f2\" (UID: \"84d6dae5-7d92-46d7-bc8b-56c31c5900f2\") " Nov 28 11:21:32 crc kubenswrapper[4923]: I1128 11:21:32.643309 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84d6dae5-7d92-46d7-bc8b-56c31c5900f2-bundle" (OuterVolumeSpecName: "bundle") pod "84d6dae5-7d92-46d7-bc8b-56c31c5900f2" (UID: "84d6dae5-7d92-46d7-bc8b-56c31c5900f2"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:21:32 crc kubenswrapper[4923]: I1128 11:21:32.652973 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84d6dae5-7d92-46d7-bc8b-56c31c5900f2-kube-api-access-kjt7j" (OuterVolumeSpecName: "kube-api-access-kjt7j") pod "84d6dae5-7d92-46d7-bc8b-56c31c5900f2" (UID: "84d6dae5-7d92-46d7-bc8b-56c31c5900f2"). InnerVolumeSpecName "kube-api-access-kjt7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:21:32 crc kubenswrapper[4923]: I1128 11:21:32.744868 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kjt7j\" (UniqueName: \"kubernetes.io/projected/84d6dae5-7d92-46d7-bc8b-56c31c5900f2-kube-api-access-kjt7j\") on node \"crc\" DevicePath \"\"" Nov 28 11:21:32 crc kubenswrapper[4923]: I1128 11:21:32.744978 4923 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/84d6dae5-7d92-46d7-bc8b-56c31c5900f2-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:21:33 crc kubenswrapper[4923]: I1128 11:21:33.145561 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84d6dae5-7d92-46d7-bc8b-56c31c5900f2-util" (OuterVolumeSpecName: "util") pod "84d6dae5-7d92-46d7-bc8b-56c31c5900f2" (UID: "84d6dae5-7d92-46d7-bc8b-56c31c5900f2"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:21:33 crc kubenswrapper[4923]: I1128 11:21:33.151259 4923 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/84d6dae5-7d92-46d7-bc8b-56c31c5900f2-util\") on node \"crc\" DevicePath \"\"" Nov 28 11:21:33 crc kubenswrapper[4923]: I1128 11:21:33.229898 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69" event={"ID":"84d6dae5-7d92-46d7-bc8b-56c31c5900f2","Type":"ContainerDied","Data":"0f6db366efd8dd47850c02a9d0894fb1445a5cf9997812cd2ac724ef1d657a22"} Nov 28 11:21:33 crc kubenswrapper[4923]: I1128 11:21:33.230004 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f6db366efd8dd47850c02a9d0894fb1445a5cf9997812cd2ac724ef1d657a22" Nov 28 11:21:33 crc kubenswrapper[4923]: I1128 11:21:33.230032 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69" Nov 28 11:21:34 crc kubenswrapper[4923]: I1128 11:21:34.245253 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7cmr5" event={"ID":"87586000-11ac-48c2-bee1-b8bb3c840b0e","Type":"ContainerStarted","Data":"9d390380b03f4f3817369ee3320c901bd6d41aaf4254cbc91ae2449cac53a8b9"} Nov 28 11:21:34 crc kubenswrapper[4923]: I1128 11:21:34.274885 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7cmr5" podStartSLOduration=3.293842832 podStartE2EDuration="6.274857561s" podCreationTimestamp="2025-11-28 11:21:28 +0000 UTC" firstStartedPulling="2025-11-28 11:21:30.177648189 +0000 UTC m=+769.306332389" lastFinishedPulling="2025-11-28 11:21:33.158662868 +0000 UTC m=+772.287347118" observedRunningTime="2025-11-28 11:21:34.271471105 +0000 UTC m=+773.400155345" watchObservedRunningTime="2025-11-28 11:21:34.274857561 +0000 UTC m=+773.403541811" Nov 28 11:21:36 crc kubenswrapper[4923]: I1128 11:21:36.858926 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-8ng48"] Nov 28 11:21:36 crc kubenswrapper[4923]: E1128 11:21:36.859333 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84d6dae5-7d92-46d7-bc8b-56c31c5900f2" containerName="extract" Nov 28 11:21:36 crc kubenswrapper[4923]: I1128 11:21:36.859345 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="84d6dae5-7d92-46d7-bc8b-56c31c5900f2" containerName="extract" Nov 28 11:21:36 crc kubenswrapper[4923]: E1128 11:21:36.859355 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84d6dae5-7d92-46d7-bc8b-56c31c5900f2" containerName="util" Nov 28 11:21:36 crc kubenswrapper[4923]: I1128 11:21:36.859362 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="84d6dae5-7d92-46d7-bc8b-56c31c5900f2" containerName="util" Nov 28 11:21:36 crc kubenswrapper[4923]: E1128 11:21:36.859373 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84d6dae5-7d92-46d7-bc8b-56c31c5900f2" containerName="pull" Nov 28 11:21:36 crc kubenswrapper[4923]: I1128 11:21:36.859379 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="84d6dae5-7d92-46d7-bc8b-56c31c5900f2" containerName="pull" Nov 28 11:21:36 crc kubenswrapper[4923]: I1128 11:21:36.859474 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="84d6dae5-7d92-46d7-bc8b-56c31c5900f2" containerName="extract" Nov 28 11:21:36 crc kubenswrapper[4923]: I1128 11:21:36.859795 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-8ng48" Nov 28 11:21:36 crc kubenswrapper[4923]: I1128 11:21:36.861827 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-69rgg" Nov 28 11:21:36 crc kubenswrapper[4923]: I1128 11:21:36.862566 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 28 11:21:36 crc kubenswrapper[4923]: I1128 11:21:36.862892 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 28 11:21:36 crc kubenswrapper[4923]: I1128 11:21:36.874217 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-8ng48"] Nov 28 11:21:36 crc kubenswrapper[4923]: I1128 11:21:36.949320 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-227zv\" (UniqueName: \"kubernetes.io/projected/0aa4d573-fe6f-4c0a-930d-99cd8691c86f-kube-api-access-227zv\") pod \"nmstate-operator-5b5b58f5c8-8ng48\" (UID: \"0aa4d573-fe6f-4c0a-930d-99cd8691c86f\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-8ng48" Nov 28 11:21:37 crc kubenswrapper[4923]: I1128 11:21:37.050583 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-227zv\" (UniqueName: \"kubernetes.io/projected/0aa4d573-fe6f-4c0a-930d-99cd8691c86f-kube-api-access-227zv\") pod \"nmstate-operator-5b5b58f5c8-8ng48\" (UID: \"0aa4d573-fe6f-4c0a-930d-99cd8691c86f\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-8ng48" Nov 28 11:21:37 crc kubenswrapper[4923]: I1128 11:21:37.068451 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-227zv\" (UniqueName: \"kubernetes.io/projected/0aa4d573-fe6f-4c0a-930d-99cd8691c86f-kube-api-access-227zv\") pod \"nmstate-operator-5b5b58f5c8-8ng48\" (UID: \"0aa4d573-fe6f-4c0a-930d-99cd8691c86f\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-8ng48" Nov 28 11:21:37 crc kubenswrapper[4923]: I1128 11:21:37.175361 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-8ng48" Nov 28 11:21:37 crc kubenswrapper[4923]: I1128 11:21:37.414722 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-8ng48"] Nov 28 11:21:38 crc kubenswrapper[4923]: I1128 11:21:38.272656 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-8ng48" event={"ID":"0aa4d573-fe6f-4c0a-930d-99cd8691c86f","Type":"ContainerStarted","Data":"1940b12563747a425d943857d41cf725b35bf590fda9223de3626b613a62aac6"} Nov 28 11:21:39 crc kubenswrapper[4923]: I1128 11:21:39.255380 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7cmr5" Nov 28 11:21:39 crc kubenswrapper[4923]: I1128 11:21:39.256214 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7cmr5" Nov 28 11:21:40 crc kubenswrapper[4923]: I1128 11:21:40.292680 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7cmr5" podUID="87586000-11ac-48c2-bee1-b8bb3c840b0e" containerName="registry-server" probeResult="failure" output=< Nov 28 11:21:40 crc kubenswrapper[4923]: timeout: failed to connect service ":50051" within 1s Nov 28 11:21:40 crc kubenswrapper[4923]: > Nov 28 11:21:41 crc kubenswrapper[4923]: I1128 11:21:41.299270 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-8ng48" event={"ID":"0aa4d573-fe6f-4c0a-930d-99cd8691c86f","Type":"ContainerStarted","Data":"987bbe0c7a1563179f464977fb3c21ce7567844cb667d8c19d2c583caea1c94f"} Nov 28 11:21:41 crc kubenswrapper[4923]: I1128 11:21:41.328413 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-8ng48" podStartSLOduration=2.630233749 podStartE2EDuration="5.328386795s" podCreationTimestamp="2025-11-28 11:21:36 +0000 UTC" firstStartedPulling="2025-11-28 11:21:37.425907321 +0000 UTC m=+776.554591531" lastFinishedPulling="2025-11-28 11:21:40.124060327 +0000 UTC m=+779.252744577" observedRunningTime="2025-11-28 11:21:41.321435758 +0000 UTC m=+780.450119978" watchObservedRunningTime="2025-11-28 11:21:41.328386795 +0000 UTC m=+780.457071025" Nov 28 11:21:44 crc kubenswrapper[4923]: I1128 11:21:44.027144 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:21:44 crc kubenswrapper[4923]: I1128 11:21:44.027688 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:21:44 crc kubenswrapper[4923]: I1128 11:21:44.027765 4923 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:21:44 crc kubenswrapper[4923]: I1128 11:21:44.028783 4923 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"677ed572a7b0e83cdbaab7053a3f1b65f579449e7b5bb37190e07948114a0b10"} pod="openshift-machine-config-operator/machine-config-daemon-bwdth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 11:21:44 crc kubenswrapper[4923]: I1128 11:21:44.028900 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" containerID="cri-o://677ed572a7b0e83cdbaab7053a3f1b65f579449e7b5bb37190e07948114a0b10" gracePeriod=600 Nov 28 11:21:45 crc kubenswrapper[4923]: I1128 11:21:45.324615 4923 generic.go:334] "Generic (PLEG): container finished" podID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerID="677ed572a7b0e83cdbaab7053a3f1b65f579449e7b5bb37190e07948114a0b10" exitCode=0 Nov 28 11:21:45 crc kubenswrapper[4923]: I1128 11:21:45.324691 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerDied","Data":"677ed572a7b0e83cdbaab7053a3f1b65f579449e7b5bb37190e07948114a0b10"} Nov 28 11:21:45 crc kubenswrapper[4923]: I1128 11:21:45.325057 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerStarted","Data":"1125a66670947f90cf2e295b500044f466e54c6f2bb9f5eb7e6841beb4d77d04"} Nov 28 11:21:45 crc kubenswrapper[4923]: I1128 11:21:45.325085 4923 scope.go:117] "RemoveContainer" containerID="64a773c89c3c8eb963ad3cd621825622fbc3cbefa6e6e24b0ed07fdb1769cf81" Nov 28 11:21:46 crc kubenswrapper[4923]: I1128 11:21:46.859447 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-pmjlv"] Nov 28 11:21:46 crc kubenswrapper[4923]: I1128 11:21:46.860795 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-pmjlv" Nov 28 11:21:46 crc kubenswrapper[4923]: I1128 11:21:46.864515 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-574r2" Nov 28 11:21:46 crc kubenswrapper[4923]: I1128 11:21:46.870915 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pns7s"] Nov 28 11:21:46 crc kubenswrapper[4923]: I1128 11:21:46.871834 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pns7s" Nov 28 11:21:46 crc kubenswrapper[4923]: I1128 11:21:46.875175 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 28 11:21:46 crc kubenswrapper[4923]: I1128 11:21:46.880501 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pns7s"] Nov 28 11:21:46 crc kubenswrapper[4923]: I1128 11:21:46.900048 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-8rfxv"] Nov 28 11:21:46 crc kubenswrapper[4923]: I1128 11:21:46.900725 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-8rfxv" Nov 28 11:21:46 crc kubenswrapper[4923]: I1128 11:21:46.958229 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-pmjlv"] Nov 28 11:21:46 crc kubenswrapper[4923]: I1128 11:21:46.984562 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b6f09dc3-c7f4-40fe-862e-badef31718a6-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-pns7s\" (UID: \"b6f09dc3-c7f4-40fe-862e-badef31718a6\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pns7s" Nov 28 11:21:46 crc kubenswrapper[4923]: I1128 11:21:46.984615 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/860a9fa6-e760-4c45-a051-4fcf8b6c3fc4-nmstate-lock\") pod \"nmstate-handler-8rfxv\" (UID: \"860a9fa6-e760-4c45-a051-4fcf8b6c3fc4\") " pod="openshift-nmstate/nmstate-handler-8rfxv" Nov 28 11:21:46 crc kubenswrapper[4923]: I1128 11:21:46.984684 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/860a9fa6-e760-4c45-a051-4fcf8b6c3fc4-dbus-socket\") pod \"nmstate-handler-8rfxv\" (UID: \"860a9fa6-e760-4c45-a051-4fcf8b6c3fc4\") " pod="openshift-nmstate/nmstate-handler-8rfxv" Nov 28 11:21:46 crc kubenswrapper[4923]: I1128 11:21:46.984721 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/860a9fa6-e760-4c45-a051-4fcf8b6c3fc4-ovs-socket\") pod \"nmstate-handler-8rfxv\" (UID: \"860a9fa6-e760-4c45-a051-4fcf8b6c3fc4\") " pod="openshift-nmstate/nmstate-handler-8rfxv" Nov 28 11:21:46 crc kubenswrapper[4923]: I1128 11:21:46.984748 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfs7c\" (UniqueName: \"kubernetes.io/projected/860a9fa6-e760-4c45-a051-4fcf8b6c3fc4-kube-api-access-xfs7c\") pod \"nmstate-handler-8rfxv\" (UID: \"860a9fa6-e760-4c45-a051-4fcf8b6c3fc4\") " pod="openshift-nmstate/nmstate-handler-8rfxv" Nov 28 11:21:46 crc kubenswrapper[4923]: I1128 11:21:46.984781 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5flj9\" (UniqueName: \"kubernetes.io/projected/772d8536-f37b-434f-a5d3-d569c2079591-kube-api-access-5flj9\") pod \"nmstate-metrics-7f946cbc9-pmjlv\" (UID: \"772d8536-f37b-434f-a5d3-d569c2079591\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-pmjlv" Nov 28 11:21:46 crc kubenswrapper[4923]: I1128 11:21:46.984858 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7cwg\" (UniqueName: \"kubernetes.io/projected/b6f09dc3-c7f4-40fe-862e-badef31718a6-kube-api-access-r7cwg\") pod \"nmstate-webhook-5f6d4c5ccb-pns7s\" (UID: \"b6f09dc3-c7f4-40fe-862e-badef31718a6\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pns7s" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.026323 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-btvd5"] Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.026999 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-btvd5" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.028579 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.028675 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-78868" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.030826 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.038099 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-btvd5"] Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.086010 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/860a9fa6-e760-4c45-a051-4fcf8b6c3fc4-dbus-socket\") pod \"nmstate-handler-8rfxv\" (UID: \"860a9fa6-e760-4c45-a051-4fcf8b6c3fc4\") " pod="openshift-nmstate/nmstate-handler-8rfxv" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.086056 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/860a9fa6-e760-4c45-a051-4fcf8b6c3fc4-ovs-socket\") pod \"nmstate-handler-8rfxv\" (UID: \"860a9fa6-e760-4c45-a051-4fcf8b6c3fc4\") " pod="openshift-nmstate/nmstate-handler-8rfxv" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.086081 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfs7c\" (UniqueName: \"kubernetes.io/projected/860a9fa6-e760-4c45-a051-4fcf8b6c3fc4-kube-api-access-xfs7c\") pod \"nmstate-handler-8rfxv\" (UID: \"860a9fa6-e760-4c45-a051-4fcf8b6c3fc4\") " pod="openshift-nmstate/nmstate-handler-8rfxv" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.086097 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5flj9\" (UniqueName: \"kubernetes.io/projected/772d8536-f37b-434f-a5d3-d569c2079591-kube-api-access-5flj9\") pod \"nmstate-metrics-7f946cbc9-pmjlv\" (UID: \"772d8536-f37b-434f-a5d3-d569c2079591\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-pmjlv" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.086134 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7cwg\" (UniqueName: \"kubernetes.io/projected/b6f09dc3-c7f4-40fe-862e-badef31718a6-kube-api-access-r7cwg\") pod \"nmstate-webhook-5f6d4c5ccb-pns7s\" (UID: \"b6f09dc3-c7f4-40fe-862e-badef31718a6\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pns7s" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.086148 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/860a9fa6-e760-4c45-a051-4fcf8b6c3fc4-ovs-socket\") pod \"nmstate-handler-8rfxv\" (UID: \"860a9fa6-e760-4c45-a051-4fcf8b6c3fc4\") " pod="openshift-nmstate/nmstate-handler-8rfxv" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.086160 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b6f09dc3-c7f4-40fe-862e-badef31718a6-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-pns7s\" (UID: \"b6f09dc3-c7f4-40fe-862e-badef31718a6\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pns7s" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.086240 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/860a9fa6-e760-4c45-a051-4fcf8b6c3fc4-nmstate-lock\") pod \"nmstate-handler-8rfxv\" (UID: \"860a9fa6-e760-4c45-a051-4fcf8b6c3fc4\") " pod="openshift-nmstate/nmstate-handler-8rfxv" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.086266 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/860a9fa6-e760-4c45-a051-4fcf8b6c3fc4-dbus-socket\") pod \"nmstate-handler-8rfxv\" (UID: \"860a9fa6-e760-4c45-a051-4fcf8b6c3fc4\") " pod="openshift-nmstate/nmstate-handler-8rfxv" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.086533 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/860a9fa6-e760-4c45-a051-4fcf8b6c3fc4-nmstate-lock\") pod \"nmstate-handler-8rfxv\" (UID: \"860a9fa6-e760-4c45-a051-4fcf8b6c3fc4\") " pod="openshift-nmstate/nmstate-handler-8rfxv" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.100266 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b6f09dc3-c7f4-40fe-862e-badef31718a6-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-pns7s\" (UID: \"b6f09dc3-c7f4-40fe-862e-badef31718a6\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pns7s" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.101814 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5flj9\" (UniqueName: \"kubernetes.io/projected/772d8536-f37b-434f-a5d3-d569c2079591-kube-api-access-5flj9\") pod \"nmstate-metrics-7f946cbc9-pmjlv\" (UID: \"772d8536-f37b-434f-a5d3-d569c2079591\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-pmjlv" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.103867 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7cwg\" (UniqueName: \"kubernetes.io/projected/b6f09dc3-c7f4-40fe-862e-badef31718a6-kube-api-access-r7cwg\") pod \"nmstate-webhook-5f6d4c5ccb-pns7s\" (UID: \"b6f09dc3-c7f4-40fe-862e-badef31718a6\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pns7s" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.104113 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfs7c\" (UniqueName: \"kubernetes.io/projected/860a9fa6-e760-4c45-a051-4fcf8b6c3fc4-kube-api-access-xfs7c\") pod \"nmstate-handler-8rfxv\" (UID: \"860a9fa6-e760-4c45-a051-4fcf8b6c3fc4\") " pod="openshift-nmstate/nmstate-handler-8rfxv" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.186879 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxgqw\" (UniqueName: \"kubernetes.io/projected/66e9ab76-5eff-4f56-8ff0-aabc981b3b0e-kube-api-access-kxgqw\") pod \"nmstate-console-plugin-7fbb5f6569-btvd5\" (UID: \"66e9ab76-5eff-4f56-8ff0-aabc981b3b0e\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-btvd5" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.187277 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/66e9ab76-5eff-4f56-8ff0-aabc981b3b0e-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-btvd5\" (UID: \"66e9ab76-5eff-4f56-8ff0-aabc981b3b0e\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-btvd5" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.187316 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/66e9ab76-5eff-4f56-8ff0-aabc981b3b0e-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-btvd5\" (UID: \"66e9ab76-5eff-4f56-8ff0-aabc981b3b0e\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-btvd5" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.216671 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-pmjlv" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.221152 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7f7c969bc5-mnnn6"] Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.221751 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.227584 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pns7s" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.230360 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7f7c969bc5-mnnn6"] Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.234668 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-8rfxv" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.291444 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e4327280-b4bf-4f80-b06b-b21c80941f81-oauth-serving-cert\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.291479 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e4327280-b4bf-4f80-b06b-b21c80941f81-service-ca\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.291504 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxgqw\" (UniqueName: \"kubernetes.io/projected/66e9ab76-5eff-4f56-8ff0-aabc981b3b0e-kube-api-access-kxgqw\") pod \"nmstate-console-plugin-7fbb5f6569-btvd5\" (UID: \"66e9ab76-5eff-4f56-8ff0-aabc981b3b0e\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-btvd5" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.291539 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9627h\" (UniqueName: \"kubernetes.io/projected/e4327280-b4bf-4f80-b06b-b21c80941f81-kube-api-access-9627h\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.291557 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e4327280-b4bf-4f80-b06b-b21c80941f81-console-config\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.291577 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/66e9ab76-5eff-4f56-8ff0-aabc981b3b0e-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-btvd5\" (UID: \"66e9ab76-5eff-4f56-8ff0-aabc981b3b0e\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-btvd5" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.291596 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e4327280-b4bf-4f80-b06b-b21c80941f81-trusted-ca-bundle\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.291611 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e4327280-b4bf-4f80-b06b-b21c80941f81-console-oauth-config\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.291641 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/66e9ab76-5eff-4f56-8ff0-aabc981b3b0e-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-btvd5\" (UID: \"66e9ab76-5eff-4f56-8ff0-aabc981b3b0e\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-btvd5" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.291657 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e4327280-b4bf-4f80-b06b-b21c80941f81-console-serving-cert\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.292740 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/66e9ab76-5eff-4f56-8ff0-aabc981b3b0e-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-btvd5\" (UID: \"66e9ab76-5eff-4f56-8ff0-aabc981b3b0e\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-btvd5" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.314588 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/66e9ab76-5eff-4f56-8ff0-aabc981b3b0e-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-btvd5\" (UID: \"66e9ab76-5eff-4f56-8ff0-aabc981b3b0e\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-btvd5" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.316162 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxgqw\" (UniqueName: \"kubernetes.io/projected/66e9ab76-5eff-4f56-8ff0-aabc981b3b0e-kube-api-access-kxgqw\") pod \"nmstate-console-plugin-7fbb5f6569-btvd5\" (UID: \"66e9ab76-5eff-4f56-8ff0-aabc981b3b0e\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-btvd5" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.342192 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-btvd5" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.344172 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-8rfxv" event={"ID":"860a9fa6-e760-4c45-a051-4fcf8b6c3fc4","Type":"ContainerStarted","Data":"3b37b57d8e75c693732bbc89b2f6268dcfa414fcf3b1fa9e1f713dcf680e6906"} Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.393449 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e4327280-b4bf-4f80-b06b-b21c80941f81-oauth-serving-cert\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.394833 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e4327280-b4bf-4f80-b06b-b21c80941f81-service-ca\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.395124 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9627h\" (UniqueName: \"kubernetes.io/projected/e4327280-b4bf-4f80-b06b-b21c80941f81-kube-api-access-9627h\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.395156 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e4327280-b4bf-4f80-b06b-b21c80941f81-console-config\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.395194 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e4327280-b4bf-4f80-b06b-b21c80941f81-trusted-ca-bundle\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.395220 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e4327280-b4bf-4f80-b06b-b21c80941f81-console-oauth-config\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.395271 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e4327280-b4bf-4f80-b06b-b21c80941f81-console-serving-cert\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.394782 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e4327280-b4bf-4f80-b06b-b21c80941f81-oauth-serving-cert\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.397517 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e4327280-b4bf-4f80-b06b-b21c80941f81-service-ca\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.397892 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e4327280-b4bf-4f80-b06b-b21c80941f81-trusted-ca-bundle\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.400356 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e4327280-b4bf-4f80-b06b-b21c80941f81-console-config\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.404415 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e4327280-b4bf-4f80-b06b-b21c80941f81-console-oauth-config\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.408490 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e4327280-b4bf-4f80-b06b-b21c80941f81-console-serving-cert\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.414757 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9627h\" (UniqueName: \"kubernetes.io/projected/e4327280-b4bf-4f80-b06b-b21c80941f81-kube-api-access-9627h\") pod \"console-7f7c969bc5-mnnn6\" (UID: \"e4327280-b4bf-4f80-b06b-b21c80941f81\") " pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.511055 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-pmjlv"] Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.578177 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.601612 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pns7s"] Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.649430 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-btvd5"] Nov 28 11:21:47 crc kubenswrapper[4923]: I1128 11:21:47.769230 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7f7c969bc5-mnnn6"] Nov 28 11:21:47 crc kubenswrapper[4923]: W1128 11:21:47.775481 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode4327280_b4bf_4f80_b06b_b21c80941f81.slice/crio-20a9989f9cd8e81f3d6ec088a09f8205db025658a53a1eeb045a6f52822d5c54 WatchSource:0}: Error finding container 20a9989f9cd8e81f3d6ec088a09f8205db025658a53a1eeb045a6f52822d5c54: Status 404 returned error can't find the container with id 20a9989f9cd8e81f3d6ec088a09f8205db025658a53a1eeb045a6f52822d5c54 Nov 28 11:21:48 crc kubenswrapper[4923]: I1128 11:21:48.351968 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-btvd5" event={"ID":"66e9ab76-5eff-4f56-8ff0-aabc981b3b0e","Type":"ContainerStarted","Data":"233c0703b70671059728a4f3fff2dac461f20c130b797edda86ea7e68707934f"} Nov 28 11:21:48 crc kubenswrapper[4923]: I1128 11:21:48.354071 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7f7c969bc5-mnnn6" event={"ID":"e4327280-b4bf-4f80-b06b-b21c80941f81","Type":"ContainerStarted","Data":"22f0a121f2afcd302f436065b4f96cd26ac4bc90c83b749941b163bc5852d9ad"} Nov 28 11:21:48 crc kubenswrapper[4923]: I1128 11:21:48.354121 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7f7c969bc5-mnnn6" event={"ID":"e4327280-b4bf-4f80-b06b-b21c80941f81","Type":"ContainerStarted","Data":"20a9989f9cd8e81f3d6ec088a09f8205db025658a53a1eeb045a6f52822d5c54"} Nov 28 11:21:48 crc kubenswrapper[4923]: I1128 11:21:48.356114 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pns7s" event={"ID":"b6f09dc3-c7f4-40fe-862e-badef31718a6","Type":"ContainerStarted","Data":"6f0aa83ed9e731bc27a9d59b0d963275cd6ff495da0b5b93361efc7d79cd792e"} Nov 28 11:21:48 crc kubenswrapper[4923]: I1128 11:21:48.357457 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-pmjlv" event={"ID":"772d8536-f37b-434f-a5d3-d569c2079591","Type":"ContainerStarted","Data":"7693c3760be1c8ad3994d392adbaa6f9e2ff5b134afdc43869369e9b6c43da8d"} Nov 28 11:21:48 crc kubenswrapper[4923]: I1128 11:21:48.385091 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7f7c969bc5-mnnn6" podStartSLOduration=1.385073181 podStartE2EDuration="1.385073181s" podCreationTimestamp="2025-11-28 11:21:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:21:48.382454377 +0000 UTC m=+787.511138617" watchObservedRunningTime="2025-11-28 11:21:48.385073181 +0000 UTC m=+787.513757421" Nov 28 11:21:49 crc kubenswrapper[4923]: I1128 11:21:49.308301 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7cmr5" Nov 28 11:21:49 crc kubenswrapper[4923]: I1128 11:21:49.352052 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7cmr5" Nov 28 11:21:49 crc kubenswrapper[4923]: I1128 11:21:49.539517 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7cmr5"] Nov 28 11:21:50 crc kubenswrapper[4923]: I1128 11:21:50.378014 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7cmr5" podUID="87586000-11ac-48c2-bee1-b8bb3c840b0e" containerName="registry-server" containerID="cri-o://9d390380b03f4f3817369ee3320c901bd6d41aaf4254cbc91ae2449cac53a8b9" gracePeriod=2 Nov 28 11:21:50 crc kubenswrapper[4923]: I1128 11:21:50.961364 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7cmr5" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.058973 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87586000-11ac-48c2-bee1-b8bb3c840b0e-utilities\") pod \"87586000-11ac-48c2-bee1-b8bb3c840b0e\" (UID: \"87586000-11ac-48c2-bee1-b8bb3c840b0e\") " Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.059089 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87586000-11ac-48c2-bee1-b8bb3c840b0e-catalog-content\") pod \"87586000-11ac-48c2-bee1-b8bb3c840b0e\" (UID: \"87586000-11ac-48c2-bee1-b8bb3c840b0e\") " Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.059157 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8t8km\" (UniqueName: \"kubernetes.io/projected/87586000-11ac-48c2-bee1-b8bb3c840b0e-kube-api-access-8t8km\") pod \"87586000-11ac-48c2-bee1-b8bb3c840b0e\" (UID: \"87586000-11ac-48c2-bee1-b8bb3c840b0e\") " Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.060409 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87586000-11ac-48c2-bee1-b8bb3c840b0e-utilities" (OuterVolumeSpecName: "utilities") pod "87586000-11ac-48c2-bee1-b8bb3c840b0e" (UID: "87586000-11ac-48c2-bee1-b8bb3c840b0e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.063673 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87586000-11ac-48c2-bee1-b8bb3c840b0e-kube-api-access-8t8km" (OuterVolumeSpecName: "kube-api-access-8t8km") pod "87586000-11ac-48c2-bee1-b8bb3c840b0e" (UID: "87586000-11ac-48c2-bee1-b8bb3c840b0e"). InnerVolumeSpecName "kube-api-access-8t8km". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.160244 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87586000-11ac-48c2-bee1-b8bb3c840b0e-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.160465 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8t8km\" (UniqueName: \"kubernetes.io/projected/87586000-11ac-48c2-bee1-b8bb3c840b0e-kube-api-access-8t8km\") on node \"crc\" DevicePath \"\"" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.185086 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87586000-11ac-48c2-bee1-b8bb3c840b0e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "87586000-11ac-48c2-bee1-b8bb3c840b0e" (UID: "87586000-11ac-48c2-bee1-b8bb3c840b0e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.262472 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87586000-11ac-48c2-bee1-b8bb3c840b0e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.402815 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-btvd5" event={"ID":"66e9ab76-5eff-4f56-8ff0-aabc981b3b0e","Type":"ContainerStarted","Data":"288420d847e7bdb8fb506aecafe3468bb5e5daf33cd5d6600c634bcfcc5450f6"} Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.408680 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pns7s" event={"ID":"b6f09dc3-c7f4-40fe-862e-badef31718a6","Type":"ContainerStarted","Data":"16bc7384bd24112832084a099008fabf7a99582d11a74e2c21e0ae6d1b824a36"} Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.409253 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pns7s" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.411247 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-8rfxv" event={"ID":"860a9fa6-e760-4c45-a051-4fcf8b6c3fc4","Type":"ContainerStarted","Data":"5e2118816eb3bdfe0e64f3f5cb7495a762d6da8bf581b766e81dae76e5a579a2"} Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.411582 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-8rfxv" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.413043 4923 generic.go:334] "Generic (PLEG): container finished" podID="87586000-11ac-48c2-bee1-b8bb3c840b0e" containerID="9d390380b03f4f3817369ee3320c901bd6d41aaf4254cbc91ae2449cac53a8b9" exitCode=0 Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.413102 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7cmr5" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.413119 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7cmr5" event={"ID":"87586000-11ac-48c2-bee1-b8bb3c840b0e","Type":"ContainerDied","Data":"9d390380b03f4f3817369ee3320c901bd6d41aaf4254cbc91ae2449cac53a8b9"} Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.413673 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7cmr5" event={"ID":"87586000-11ac-48c2-bee1-b8bb3c840b0e","Type":"ContainerDied","Data":"c26183dae36e36929caeb8bd702808304948030868f8f07d5914ca16fed113fb"} Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.413715 4923 scope.go:117] "RemoveContainer" containerID="9d390380b03f4f3817369ee3320c901bd6d41aaf4254cbc91ae2449cac53a8b9" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.430671 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-btvd5" podStartSLOduration=1.296374692 podStartE2EDuration="4.430649873s" podCreationTimestamp="2025-11-28 11:21:47 +0000 UTC" firstStartedPulling="2025-11-28 11:21:47.658876448 +0000 UTC m=+786.787560658" lastFinishedPulling="2025-11-28 11:21:50.793151619 +0000 UTC m=+789.921835839" observedRunningTime="2025-11-28 11:21:51.425829796 +0000 UTC m=+790.554514046" watchObservedRunningTime="2025-11-28 11:21:51.430649873 +0000 UTC m=+790.559334083" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.457484 4923 scope.go:117] "RemoveContainer" containerID="b304447d367325c0290e2aa69e8a9b377ac66b8eb440967ddb14049054827b63" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.460119 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pns7s" podStartSLOduration=2.27048056 podStartE2EDuration="5.460107946s" podCreationTimestamp="2025-11-28 11:21:46 +0000 UTC" firstStartedPulling="2025-11-28 11:21:47.629120377 +0000 UTC m=+786.757804587" lastFinishedPulling="2025-11-28 11:21:50.818747753 +0000 UTC m=+789.947431973" observedRunningTime="2025-11-28 11:21:51.458914852 +0000 UTC m=+790.587599072" watchObservedRunningTime="2025-11-28 11:21:51.460107946 +0000 UTC m=+790.588792166" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.489532 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-8rfxv" podStartSLOduration=2.007043408 podStartE2EDuration="5.487912693s" podCreationTimestamp="2025-11-28 11:21:46 +0000 UTC" firstStartedPulling="2025-11-28 11:21:47.3131983 +0000 UTC m=+786.441882510" lastFinishedPulling="2025-11-28 11:21:50.794067585 +0000 UTC m=+789.922751795" observedRunningTime="2025-11-28 11:21:51.477517909 +0000 UTC m=+790.606202159" watchObservedRunningTime="2025-11-28 11:21:51.487912693 +0000 UTC m=+790.616596943" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.492848 4923 scope.go:117] "RemoveContainer" containerID="e1e0e50ca90237f91e8dd73c787155e16f1a76474955e578902633550d24afb5" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.512897 4923 scope.go:117] "RemoveContainer" containerID="9d390380b03f4f3817369ee3320c901bd6d41aaf4254cbc91ae2449cac53a8b9" Nov 28 11:21:51 crc kubenswrapper[4923]: E1128 11:21:51.513306 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d390380b03f4f3817369ee3320c901bd6d41aaf4254cbc91ae2449cac53a8b9\": container with ID starting with 9d390380b03f4f3817369ee3320c901bd6d41aaf4254cbc91ae2449cac53a8b9 not found: ID does not exist" containerID="9d390380b03f4f3817369ee3320c901bd6d41aaf4254cbc91ae2449cac53a8b9" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.513357 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d390380b03f4f3817369ee3320c901bd6d41aaf4254cbc91ae2449cac53a8b9"} err="failed to get container status \"9d390380b03f4f3817369ee3320c901bd6d41aaf4254cbc91ae2449cac53a8b9\": rpc error: code = NotFound desc = could not find container \"9d390380b03f4f3817369ee3320c901bd6d41aaf4254cbc91ae2449cac53a8b9\": container with ID starting with 9d390380b03f4f3817369ee3320c901bd6d41aaf4254cbc91ae2449cac53a8b9 not found: ID does not exist" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.513389 4923 scope.go:117] "RemoveContainer" containerID="b304447d367325c0290e2aa69e8a9b377ac66b8eb440967ddb14049054827b63" Nov 28 11:21:51 crc kubenswrapper[4923]: E1128 11:21:51.513902 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b304447d367325c0290e2aa69e8a9b377ac66b8eb440967ddb14049054827b63\": container with ID starting with b304447d367325c0290e2aa69e8a9b377ac66b8eb440967ddb14049054827b63 not found: ID does not exist" containerID="b304447d367325c0290e2aa69e8a9b377ac66b8eb440967ddb14049054827b63" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.513970 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b304447d367325c0290e2aa69e8a9b377ac66b8eb440967ddb14049054827b63"} err="failed to get container status \"b304447d367325c0290e2aa69e8a9b377ac66b8eb440967ddb14049054827b63\": rpc error: code = NotFound desc = could not find container \"b304447d367325c0290e2aa69e8a9b377ac66b8eb440967ddb14049054827b63\": container with ID starting with b304447d367325c0290e2aa69e8a9b377ac66b8eb440967ddb14049054827b63 not found: ID does not exist" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.514000 4923 scope.go:117] "RemoveContainer" containerID="e1e0e50ca90237f91e8dd73c787155e16f1a76474955e578902633550d24afb5" Nov 28 11:21:51 crc kubenswrapper[4923]: E1128 11:21:51.514444 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1e0e50ca90237f91e8dd73c787155e16f1a76474955e578902633550d24afb5\": container with ID starting with e1e0e50ca90237f91e8dd73c787155e16f1a76474955e578902633550d24afb5 not found: ID does not exist" containerID="e1e0e50ca90237f91e8dd73c787155e16f1a76474955e578902633550d24afb5" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.514471 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1e0e50ca90237f91e8dd73c787155e16f1a76474955e578902633550d24afb5"} err="failed to get container status \"e1e0e50ca90237f91e8dd73c787155e16f1a76474955e578902633550d24afb5\": rpc error: code = NotFound desc = could not find container \"e1e0e50ca90237f91e8dd73c787155e16f1a76474955e578902633550d24afb5\": container with ID starting with e1e0e50ca90237f91e8dd73c787155e16f1a76474955e578902633550d24afb5 not found: ID does not exist" Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.516309 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7cmr5"] Nov 28 11:21:51 crc kubenswrapper[4923]: I1128 11:21:51.522673 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7cmr5"] Nov 28 11:21:53 crc kubenswrapper[4923]: I1128 11:21:53.176223 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87586000-11ac-48c2-bee1-b8bb3c840b0e" path="/var/lib/kubelet/pods/87586000-11ac-48c2-bee1-b8bb3c840b0e/volumes" Nov 28 11:21:53 crc kubenswrapper[4923]: I1128 11:21:53.429492 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-pmjlv" event={"ID":"772d8536-f37b-434f-a5d3-d569c2079591","Type":"ContainerStarted","Data":"67c03da9dcd9ce3a28b163e2290fa9b32f8eba520e816b738201a26d1cb934f9"} Nov 28 11:21:56 crc kubenswrapper[4923]: I1128 11:21:56.454531 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-pmjlv" event={"ID":"772d8536-f37b-434f-a5d3-d569c2079591","Type":"ContainerStarted","Data":"d3bb7e2dbbb5ff13e724595f028f96f6e3aa16aab5e2fbf160b41e59210ff3bf"} Nov 28 11:21:56 crc kubenswrapper[4923]: I1128 11:21:56.477743 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-pmjlv" podStartSLOduration=1.822700281 podStartE2EDuration="10.477718561s" podCreationTimestamp="2025-11-28 11:21:46 +0000 UTC" firstStartedPulling="2025-11-28 11:21:47.571102215 +0000 UTC m=+786.699786425" lastFinishedPulling="2025-11-28 11:21:56.226120465 +0000 UTC m=+795.354804705" observedRunningTime="2025-11-28 11:21:56.472411211 +0000 UTC m=+795.601095461" watchObservedRunningTime="2025-11-28 11:21:56.477718561 +0000 UTC m=+795.606402811" Nov 28 11:21:57 crc kubenswrapper[4923]: I1128 11:21:57.263670 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-8rfxv" Nov 28 11:21:57 crc kubenswrapper[4923]: I1128 11:21:57.579425 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:57 crc kubenswrapper[4923]: I1128 11:21:57.579500 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:57 crc kubenswrapper[4923]: I1128 11:21:57.588896 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:58 crc kubenswrapper[4923]: I1128 11:21:58.475140 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7f7c969bc5-mnnn6" Nov 28 11:21:58 crc kubenswrapper[4923]: I1128 11:21:58.550803 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-2vsdg"] Nov 28 11:22:07 crc kubenswrapper[4923]: I1128 11:22:07.236717 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pns7s" Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.201986 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8"] Nov 28 11:22:21 crc kubenswrapper[4923]: E1128 11:22:21.202598 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87586000-11ac-48c2-bee1-b8bb3c840b0e" containerName="extract-utilities" Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.202611 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="87586000-11ac-48c2-bee1-b8bb3c840b0e" containerName="extract-utilities" Nov 28 11:22:21 crc kubenswrapper[4923]: E1128 11:22:21.202627 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87586000-11ac-48c2-bee1-b8bb3c840b0e" containerName="extract-content" Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.202633 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="87586000-11ac-48c2-bee1-b8bb3c840b0e" containerName="extract-content" Nov 28 11:22:21 crc kubenswrapper[4923]: E1128 11:22:21.202640 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87586000-11ac-48c2-bee1-b8bb3c840b0e" containerName="registry-server" Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.202646 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="87586000-11ac-48c2-bee1-b8bb3c840b0e" containerName="registry-server" Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.202729 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="87586000-11ac-48c2-bee1-b8bb3c840b0e" containerName="registry-server" Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.203565 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8" Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.205770 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.217583 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8"] Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.331465 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a23f6f20-cf93-4a58-868f-42242f0f1e17-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8\" (UID: \"a23f6f20-cf93-4a58-868f-42242f0f1e17\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8" Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.331744 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a23f6f20-cf93-4a58-868f-42242f0f1e17-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8\" (UID: \"a23f6f20-cf93-4a58-868f-42242f0f1e17\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8" Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.331822 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d64nx\" (UniqueName: \"kubernetes.io/projected/a23f6f20-cf93-4a58-868f-42242f0f1e17-kube-api-access-d64nx\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8\" (UID: \"a23f6f20-cf93-4a58-868f-42242f0f1e17\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8" Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.433462 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a23f6f20-cf93-4a58-868f-42242f0f1e17-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8\" (UID: \"a23f6f20-cf93-4a58-868f-42242f0f1e17\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8" Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.433548 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a23f6f20-cf93-4a58-868f-42242f0f1e17-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8\" (UID: \"a23f6f20-cf93-4a58-868f-42242f0f1e17\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8" Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.433592 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d64nx\" (UniqueName: \"kubernetes.io/projected/a23f6f20-cf93-4a58-868f-42242f0f1e17-kube-api-access-d64nx\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8\" (UID: \"a23f6f20-cf93-4a58-868f-42242f0f1e17\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8" Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.433979 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a23f6f20-cf93-4a58-868f-42242f0f1e17-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8\" (UID: \"a23f6f20-cf93-4a58-868f-42242f0f1e17\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8" Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.433985 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a23f6f20-cf93-4a58-868f-42242f0f1e17-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8\" (UID: \"a23f6f20-cf93-4a58-868f-42242f0f1e17\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8" Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.462131 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d64nx\" (UniqueName: \"kubernetes.io/projected/a23f6f20-cf93-4a58-868f-42242f0f1e17-kube-api-access-d64nx\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8\" (UID: \"a23f6f20-cf93-4a58-868f-42242f0f1e17\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8" Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.519988 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8" Nov 28 11:22:21 crc kubenswrapper[4923]: I1128 11:22:21.783823 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8"] Nov 28 11:22:21 crc kubenswrapper[4923]: W1128 11:22:21.792895 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda23f6f20_cf93_4a58_868f_42242f0f1e17.slice/crio-8062ffa4949fc9d63933fbd22a359fcb266711b9590d40ba35151debeabf3f7e WatchSource:0}: Error finding container 8062ffa4949fc9d63933fbd22a359fcb266711b9590d40ba35151debeabf3f7e: Status 404 returned error can't find the container with id 8062ffa4949fc9d63933fbd22a359fcb266711b9590d40ba35151debeabf3f7e Nov 28 11:22:22 crc kubenswrapper[4923]: I1128 11:22:22.642369 4923 generic.go:334] "Generic (PLEG): container finished" podID="a23f6f20-cf93-4a58-868f-42242f0f1e17" containerID="05c5a6882fb74491a009907f19a05945fb37693f008852fc472050e83000f495" exitCode=0 Nov 28 11:22:22 crc kubenswrapper[4923]: I1128 11:22:22.642442 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8" event={"ID":"a23f6f20-cf93-4a58-868f-42242f0f1e17","Type":"ContainerDied","Data":"05c5a6882fb74491a009907f19a05945fb37693f008852fc472050e83000f495"} Nov 28 11:22:22 crc kubenswrapper[4923]: I1128 11:22:22.642880 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8" event={"ID":"a23f6f20-cf93-4a58-868f-42242f0f1e17","Type":"ContainerStarted","Data":"8062ffa4949fc9d63933fbd22a359fcb266711b9590d40ba35151debeabf3f7e"} Nov 28 11:22:23 crc kubenswrapper[4923]: I1128 11:22:23.625483 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-2vsdg" podUID="aa97fc63-7e09-4217-9fb9-78fca4703f04" containerName="console" containerID="cri-o://f8eb92c4f8e5e324e7afdaa03d4ca4a6396aada381ec16ee9fb2b01c24c67b62" gracePeriod=15 Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.065296 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-2vsdg_aa97fc63-7e09-4217-9fb9-78fca4703f04/console/0.log" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.065589 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.180988 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-serving-cert\") pod \"aa97fc63-7e09-4217-9fb9-78fca4703f04\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.181116 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-service-ca\") pod \"aa97fc63-7e09-4217-9fb9-78fca4703f04\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.181166 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwmwf\" (UniqueName: \"kubernetes.io/projected/aa97fc63-7e09-4217-9fb9-78fca4703f04-kube-api-access-wwmwf\") pod \"aa97fc63-7e09-4217-9fb9-78fca4703f04\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.181229 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-config\") pod \"aa97fc63-7e09-4217-9fb9-78fca4703f04\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.181271 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-oauth-serving-cert\") pod \"aa97fc63-7e09-4217-9fb9-78fca4703f04\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.181341 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-oauth-config\") pod \"aa97fc63-7e09-4217-9fb9-78fca4703f04\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.181537 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-trusted-ca-bundle\") pod \"aa97fc63-7e09-4217-9fb9-78fca4703f04\" (UID: \"aa97fc63-7e09-4217-9fb9-78fca4703f04\") " Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.184378 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "aa97fc63-7e09-4217-9fb9-78fca4703f04" (UID: "aa97fc63-7e09-4217-9fb9-78fca4703f04"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.184495 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "aa97fc63-7e09-4217-9fb9-78fca4703f04" (UID: "aa97fc63-7e09-4217-9fb9-78fca4703f04"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.185340 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-service-ca" (OuterVolumeSpecName: "service-ca") pod "aa97fc63-7e09-4217-9fb9-78fca4703f04" (UID: "aa97fc63-7e09-4217-9fb9-78fca4703f04"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.185653 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-config" (OuterVolumeSpecName: "console-config") pod "aa97fc63-7e09-4217-9fb9-78fca4703f04" (UID: "aa97fc63-7e09-4217-9fb9-78fca4703f04"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.192363 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "aa97fc63-7e09-4217-9fb9-78fca4703f04" (UID: "aa97fc63-7e09-4217-9fb9-78fca4703f04"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.194236 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa97fc63-7e09-4217-9fb9-78fca4703f04-kube-api-access-wwmwf" (OuterVolumeSpecName: "kube-api-access-wwmwf") pod "aa97fc63-7e09-4217-9fb9-78fca4703f04" (UID: "aa97fc63-7e09-4217-9fb9-78fca4703f04"). InnerVolumeSpecName "kube-api-access-wwmwf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.207222 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "aa97fc63-7e09-4217-9fb9-78fca4703f04" (UID: "aa97fc63-7e09-4217-9fb9-78fca4703f04"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.283672 4923 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.283728 4923 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.283753 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwmwf\" (UniqueName: \"kubernetes.io/projected/aa97fc63-7e09-4217-9fb9-78fca4703f04-kube-api-access-wwmwf\") on node \"crc\" DevicePath \"\"" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.283780 4923 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.283804 4923 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.283825 4923 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/aa97fc63-7e09-4217-9fb9-78fca4703f04-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.283848 4923 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa97fc63-7e09-4217-9fb9-78fca4703f04-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.661177 4923 generic.go:334] "Generic (PLEG): container finished" podID="a23f6f20-cf93-4a58-868f-42242f0f1e17" containerID="beb4ba13c56ba679744770577e8e65188b161c326d998c614c6b18e597aadb86" exitCode=0 Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.661274 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8" event={"ID":"a23f6f20-cf93-4a58-868f-42242f0f1e17","Type":"ContainerDied","Data":"beb4ba13c56ba679744770577e8e65188b161c326d998c614c6b18e597aadb86"} Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.666063 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-2vsdg_aa97fc63-7e09-4217-9fb9-78fca4703f04/console/0.log" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.666375 4923 generic.go:334] "Generic (PLEG): container finished" podID="aa97fc63-7e09-4217-9fb9-78fca4703f04" containerID="f8eb92c4f8e5e324e7afdaa03d4ca4a6396aada381ec16ee9fb2b01c24c67b62" exitCode=2 Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.666447 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-2vsdg" event={"ID":"aa97fc63-7e09-4217-9fb9-78fca4703f04","Type":"ContainerDied","Data":"f8eb92c4f8e5e324e7afdaa03d4ca4a6396aada381ec16ee9fb2b01c24c67b62"} Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.666495 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-2vsdg" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.666593 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-2vsdg" event={"ID":"aa97fc63-7e09-4217-9fb9-78fca4703f04","Type":"ContainerDied","Data":"075d31152141784f0f1c4ea6ee5114d374239a290609a5f8e4e57c4b14ab6981"} Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.666736 4923 scope.go:117] "RemoveContainer" containerID="f8eb92c4f8e5e324e7afdaa03d4ca4a6396aada381ec16ee9fb2b01c24c67b62" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.718470 4923 scope.go:117] "RemoveContainer" containerID="f8eb92c4f8e5e324e7afdaa03d4ca4a6396aada381ec16ee9fb2b01c24c67b62" Nov 28 11:22:24 crc kubenswrapper[4923]: E1128 11:22:24.719107 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8eb92c4f8e5e324e7afdaa03d4ca4a6396aada381ec16ee9fb2b01c24c67b62\": container with ID starting with f8eb92c4f8e5e324e7afdaa03d4ca4a6396aada381ec16ee9fb2b01c24c67b62 not found: ID does not exist" containerID="f8eb92c4f8e5e324e7afdaa03d4ca4a6396aada381ec16ee9fb2b01c24c67b62" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.719283 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8eb92c4f8e5e324e7afdaa03d4ca4a6396aada381ec16ee9fb2b01c24c67b62"} err="failed to get container status \"f8eb92c4f8e5e324e7afdaa03d4ca4a6396aada381ec16ee9fb2b01c24c67b62\": rpc error: code = NotFound desc = could not find container \"f8eb92c4f8e5e324e7afdaa03d4ca4a6396aada381ec16ee9fb2b01c24c67b62\": container with ID starting with f8eb92c4f8e5e324e7afdaa03d4ca4a6396aada381ec16ee9fb2b01c24c67b62 not found: ID does not exist" Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.724798 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-2vsdg"] Nov 28 11:22:24 crc kubenswrapper[4923]: I1128 11:22:24.735307 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-2vsdg"] Nov 28 11:22:25 crc kubenswrapper[4923]: I1128 11:22:25.181921 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa97fc63-7e09-4217-9fb9-78fca4703f04" path="/var/lib/kubelet/pods/aa97fc63-7e09-4217-9fb9-78fca4703f04/volumes" Nov 28 11:22:25 crc kubenswrapper[4923]: I1128 11:22:25.681031 4923 generic.go:334] "Generic (PLEG): container finished" podID="a23f6f20-cf93-4a58-868f-42242f0f1e17" containerID="c026708f603a14b0834ef04f5f3e6c7d1eacb57afe14d2d4a7f29d5acdd4cf06" exitCode=0 Nov 28 11:22:25 crc kubenswrapper[4923]: I1128 11:22:25.681131 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8" event={"ID":"a23f6f20-cf93-4a58-868f-42242f0f1e17","Type":"ContainerDied","Data":"c026708f603a14b0834ef04f5f3e6c7d1eacb57afe14d2d4a7f29d5acdd4cf06"} Nov 28 11:22:26 crc kubenswrapper[4923]: I1128 11:22:26.937472 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8" Nov 28 11:22:27 crc kubenswrapper[4923]: I1128 11:22:27.023268 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a23f6f20-cf93-4a58-868f-42242f0f1e17-bundle\") pod \"a23f6f20-cf93-4a58-868f-42242f0f1e17\" (UID: \"a23f6f20-cf93-4a58-868f-42242f0f1e17\") " Nov 28 11:22:27 crc kubenswrapper[4923]: I1128 11:22:27.023354 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a23f6f20-cf93-4a58-868f-42242f0f1e17-util\") pod \"a23f6f20-cf93-4a58-868f-42242f0f1e17\" (UID: \"a23f6f20-cf93-4a58-868f-42242f0f1e17\") " Nov 28 11:22:27 crc kubenswrapper[4923]: I1128 11:22:27.023489 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d64nx\" (UniqueName: \"kubernetes.io/projected/a23f6f20-cf93-4a58-868f-42242f0f1e17-kube-api-access-d64nx\") pod \"a23f6f20-cf93-4a58-868f-42242f0f1e17\" (UID: \"a23f6f20-cf93-4a58-868f-42242f0f1e17\") " Nov 28 11:22:27 crc kubenswrapper[4923]: I1128 11:22:27.024677 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a23f6f20-cf93-4a58-868f-42242f0f1e17-bundle" (OuterVolumeSpecName: "bundle") pod "a23f6f20-cf93-4a58-868f-42242f0f1e17" (UID: "a23f6f20-cf93-4a58-868f-42242f0f1e17"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:22:27 crc kubenswrapper[4923]: I1128 11:22:27.029279 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a23f6f20-cf93-4a58-868f-42242f0f1e17-kube-api-access-d64nx" (OuterVolumeSpecName: "kube-api-access-d64nx") pod "a23f6f20-cf93-4a58-868f-42242f0f1e17" (UID: "a23f6f20-cf93-4a58-868f-42242f0f1e17"). InnerVolumeSpecName "kube-api-access-d64nx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:22:27 crc kubenswrapper[4923]: I1128 11:22:27.054922 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a23f6f20-cf93-4a58-868f-42242f0f1e17-util" (OuterVolumeSpecName: "util") pod "a23f6f20-cf93-4a58-868f-42242f0f1e17" (UID: "a23f6f20-cf93-4a58-868f-42242f0f1e17"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:22:27 crc kubenswrapper[4923]: I1128 11:22:27.125520 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d64nx\" (UniqueName: \"kubernetes.io/projected/a23f6f20-cf93-4a58-868f-42242f0f1e17-kube-api-access-d64nx\") on node \"crc\" DevicePath \"\"" Nov 28 11:22:27 crc kubenswrapper[4923]: I1128 11:22:27.125570 4923 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a23f6f20-cf93-4a58-868f-42242f0f1e17-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:22:27 crc kubenswrapper[4923]: I1128 11:22:27.125588 4923 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a23f6f20-cf93-4a58-868f-42242f0f1e17-util\") on node \"crc\" DevicePath \"\"" Nov 28 11:22:27 crc kubenswrapper[4923]: I1128 11:22:27.701020 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8" event={"ID":"a23f6f20-cf93-4a58-868f-42242f0f1e17","Type":"ContainerDied","Data":"8062ffa4949fc9d63933fbd22a359fcb266711b9590d40ba35151debeabf3f7e"} Nov 28 11:22:27 crc kubenswrapper[4923]: I1128 11:22:27.701072 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8062ffa4949fc9d63933fbd22a359fcb266711b9590d40ba35151debeabf3f7e" Nov 28 11:22:27 crc kubenswrapper[4923]: I1128 11:22:27.701135 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.485321 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh"] Nov 28 11:22:36 crc kubenswrapper[4923]: E1128 11:22:36.485924 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a23f6f20-cf93-4a58-868f-42242f0f1e17" containerName="pull" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.485947 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="a23f6f20-cf93-4a58-868f-42242f0f1e17" containerName="pull" Nov 28 11:22:36 crc kubenswrapper[4923]: E1128 11:22:36.485963 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a23f6f20-cf93-4a58-868f-42242f0f1e17" containerName="util" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.485969 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="a23f6f20-cf93-4a58-868f-42242f0f1e17" containerName="util" Nov 28 11:22:36 crc kubenswrapper[4923]: E1128 11:22:36.485975 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa97fc63-7e09-4217-9fb9-78fca4703f04" containerName="console" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.485981 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa97fc63-7e09-4217-9fb9-78fca4703f04" containerName="console" Nov 28 11:22:36 crc kubenswrapper[4923]: E1128 11:22:36.485990 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a23f6f20-cf93-4a58-868f-42242f0f1e17" containerName="extract" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.485995 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="a23f6f20-cf93-4a58-868f-42242f0f1e17" containerName="extract" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.486089 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="a23f6f20-cf93-4a58-868f-42242f0f1e17" containerName="extract" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.486106 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa97fc63-7e09-4217-9fb9-78fca4703f04" containerName="console" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.486451 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.488133 4923 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.489063 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.489244 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.489423 4923 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.494203 4923 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-c9q5l" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.547865 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh"] Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.562010 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtnjq\" (UniqueName: \"kubernetes.io/projected/870847e6-f37f-4f07-9cd0-a479202baf3d-kube-api-access-dtnjq\") pod \"metallb-operator-controller-manager-7fb5d894b8-9ncjh\" (UID: \"870847e6-f37f-4f07-9cd0-a479202baf3d\") " pod="metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.562072 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/870847e6-f37f-4f07-9cd0-a479202baf3d-apiservice-cert\") pod \"metallb-operator-controller-manager-7fb5d894b8-9ncjh\" (UID: \"870847e6-f37f-4f07-9cd0-a479202baf3d\") " pod="metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.562113 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/870847e6-f37f-4f07-9cd0-a479202baf3d-webhook-cert\") pod \"metallb-operator-controller-manager-7fb5d894b8-9ncjh\" (UID: \"870847e6-f37f-4f07-9cd0-a479202baf3d\") " pod="metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.663279 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/870847e6-f37f-4f07-9cd0-a479202baf3d-webhook-cert\") pod \"metallb-operator-controller-manager-7fb5d894b8-9ncjh\" (UID: \"870847e6-f37f-4f07-9cd0-a479202baf3d\") " pod="metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.663346 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtnjq\" (UniqueName: \"kubernetes.io/projected/870847e6-f37f-4f07-9cd0-a479202baf3d-kube-api-access-dtnjq\") pod \"metallb-operator-controller-manager-7fb5d894b8-9ncjh\" (UID: \"870847e6-f37f-4f07-9cd0-a479202baf3d\") " pod="metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.663405 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/870847e6-f37f-4f07-9cd0-a479202baf3d-apiservice-cert\") pod \"metallb-operator-controller-manager-7fb5d894b8-9ncjh\" (UID: \"870847e6-f37f-4f07-9cd0-a479202baf3d\") " pod="metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.670601 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/870847e6-f37f-4f07-9cd0-a479202baf3d-apiservice-cert\") pod \"metallb-operator-controller-manager-7fb5d894b8-9ncjh\" (UID: \"870847e6-f37f-4f07-9cd0-a479202baf3d\") " pod="metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.679917 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtnjq\" (UniqueName: \"kubernetes.io/projected/870847e6-f37f-4f07-9cd0-a479202baf3d-kube-api-access-dtnjq\") pod \"metallb-operator-controller-manager-7fb5d894b8-9ncjh\" (UID: \"870847e6-f37f-4f07-9cd0-a479202baf3d\") " pod="metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.683571 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/870847e6-f37f-4f07-9cd0-a479202baf3d-webhook-cert\") pod \"metallb-operator-controller-manager-7fb5d894b8-9ncjh\" (UID: \"870847e6-f37f-4f07-9cd0-a479202baf3d\") " pod="metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.801363 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.914365 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9"] Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.916581 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.919807 4923 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.920015 4923 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.921182 4923 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-n5lcm" Nov 28 11:22:36 crc kubenswrapper[4923]: I1128 11:22:36.939406 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9"] Nov 28 11:22:37 crc kubenswrapper[4923]: I1128 11:22:37.068819 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d410dcc0-ae4b-4376-8973-6691ea6b9939-apiservice-cert\") pod \"metallb-operator-webhook-server-58c4b9449c-b89p9\" (UID: \"d410dcc0-ae4b-4376-8973-6691ea6b9939\") " pod="metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9" Nov 28 11:22:37 crc kubenswrapper[4923]: I1128 11:22:37.068911 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d410dcc0-ae4b-4376-8973-6691ea6b9939-webhook-cert\") pod \"metallb-operator-webhook-server-58c4b9449c-b89p9\" (UID: \"d410dcc0-ae4b-4376-8973-6691ea6b9939\") " pod="metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9" Nov 28 11:22:37 crc kubenswrapper[4923]: I1128 11:22:37.068976 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtp84\" (UniqueName: \"kubernetes.io/projected/d410dcc0-ae4b-4376-8973-6691ea6b9939-kube-api-access-rtp84\") pod \"metallb-operator-webhook-server-58c4b9449c-b89p9\" (UID: \"d410dcc0-ae4b-4376-8973-6691ea6b9939\") " pod="metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9" Nov 28 11:22:37 crc kubenswrapper[4923]: I1128 11:22:37.169521 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtp84\" (UniqueName: \"kubernetes.io/projected/d410dcc0-ae4b-4376-8973-6691ea6b9939-kube-api-access-rtp84\") pod \"metallb-operator-webhook-server-58c4b9449c-b89p9\" (UID: \"d410dcc0-ae4b-4376-8973-6691ea6b9939\") " pod="metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9" Nov 28 11:22:37 crc kubenswrapper[4923]: I1128 11:22:37.169559 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d410dcc0-ae4b-4376-8973-6691ea6b9939-apiservice-cert\") pod \"metallb-operator-webhook-server-58c4b9449c-b89p9\" (UID: \"d410dcc0-ae4b-4376-8973-6691ea6b9939\") " pod="metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9" Nov 28 11:22:37 crc kubenswrapper[4923]: I1128 11:22:37.169600 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d410dcc0-ae4b-4376-8973-6691ea6b9939-webhook-cert\") pod \"metallb-operator-webhook-server-58c4b9449c-b89p9\" (UID: \"d410dcc0-ae4b-4376-8973-6691ea6b9939\") " pod="metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9" Nov 28 11:22:37 crc kubenswrapper[4923]: I1128 11:22:37.174637 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d410dcc0-ae4b-4376-8973-6691ea6b9939-webhook-cert\") pod \"metallb-operator-webhook-server-58c4b9449c-b89p9\" (UID: \"d410dcc0-ae4b-4376-8973-6691ea6b9939\") " pod="metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9" Nov 28 11:22:37 crc kubenswrapper[4923]: I1128 11:22:37.175224 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d410dcc0-ae4b-4376-8973-6691ea6b9939-apiservice-cert\") pod \"metallb-operator-webhook-server-58c4b9449c-b89p9\" (UID: \"d410dcc0-ae4b-4376-8973-6691ea6b9939\") " pod="metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9" Nov 28 11:22:37 crc kubenswrapper[4923]: I1128 11:22:37.193281 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtp84\" (UniqueName: \"kubernetes.io/projected/d410dcc0-ae4b-4376-8973-6691ea6b9939-kube-api-access-rtp84\") pod \"metallb-operator-webhook-server-58c4b9449c-b89p9\" (UID: \"d410dcc0-ae4b-4376-8973-6691ea6b9939\") " pod="metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9" Nov 28 11:22:37 crc kubenswrapper[4923]: I1128 11:22:37.251880 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh"] Nov 28 11:22:37 crc kubenswrapper[4923]: I1128 11:22:37.252261 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9" Nov 28 11:22:37 crc kubenswrapper[4923]: I1128 11:22:37.679074 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9"] Nov 28 11:22:37 crc kubenswrapper[4923]: W1128 11:22:37.685090 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd410dcc0_ae4b_4376_8973_6691ea6b9939.slice/crio-f196fe7600086b2571681b7356a65fd4c2df2f588c5daf2b49ee661997a3bd23 WatchSource:0}: Error finding container f196fe7600086b2571681b7356a65fd4c2df2f588c5daf2b49ee661997a3bd23: Status 404 returned error can't find the container with id f196fe7600086b2571681b7356a65fd4c2df2f588c5daf2b49ee661997a3bd23 Nov 28 11:22:37 crc kubenswrapper[4923]: I1128 11:22:37.760587 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh" event={"ID":"870847e6-f37f-4f07-9cd0-a479202baf3d","Type":"ContainerStarted","Data":"a4f9df1088968c9b20e01851b2fdf096bc1858de621e2e3684bfbb7ed4c939c0"} Nov 28 11:22:37 crc kubenswrapper[4923]: I1128 11:22:37.762233 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9" event={"ID":"d410dcc0-ae4b-4376-8973-6691ea6b9939","Type":"ContainerStarted","Data":"f196fe7600086b2571681b7356a65fd4c2df2f588c5daf2b49ee661997a3bd23"} Nov 28 11:22:42 crc kubenswrapper[4923]: I1128 11:22:42.797866 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh" event={"ID":"870847e6-f37f-4f07-9cd0-a479202baf3d","Type":"ContainerStarted","Data":"235b5b4bbbec6008e7da9eff22a66e54c798cc8f48dadec63f117650dae8a9f0"} Nov 28 11:22:42 crc kubenswrapper[4923]: I1128 11:22:42.798998 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh" Nov 28 11:22:42 crc kubenswrapper[4923]: I1128 11:22:42.800434 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9" event={"ID":"d410dcc0-ae4b-4376-8973-6691ea6b9939","Type":"ContainerStarted","Data":"07513bec03050d430e4baba71e8c14437b6edc70cad8cb04a1173906c9ecc11b"} Nov 28 11:22:42 crc kubenswrapper[4923]: I1128 11:22:42.800526 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9" Nov 28 11:22:42 crc kubenswrapper[4923]: I1128 11:22:42.817801 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh" podStartSLOduration=1.56617251 podStartE2EDuration="6.817786816s" podCreationTimestamp="2025-11-28 11:22:36 +0000 UTC" firstStartedPulling="2025-11-28 11:22:37.275869328 +0000 UTC m=+836.404553538" lastFinishedPulling="2025-11-28 11:22:42.527483634 +0000 UTC m=+841.656167844" observedRunningTime="2025-11-28 11:22:42.814921734 +0000 UTC m=+841.943605944" watchObservedRunningTime="2025-11-28 11:22:42.817786816 +0000 UTC m=+841.946471026" Nov 28 11:22:42 crc kubenswrapper[4923]: I1128 11:22:42.832488 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9" podStartSLOduration=1.987489327 podStartE2EDuration="6.83247379s" podCreationTimestamp="2025-11-28 11:22:36 +0000 UTC" firstStartedPulling="2025-11-28 11:22:37.687641576 +0000 UTC m=+836.816325786" lastFinishedPulling="2025-11-28 11:22:42.532626039 +0000 UTC m=+841.661310249" observedRunningTime="2025-11-28 11:22:42.831374959 +0000 UTC m=+841.960059169" watchObservedRunningTime="2025-11-28 11:22:42.83247379 +0000 UTC m=+841.961158000" Nov 28 11:22:57 crc kubenswrapper[4923]: I1128 11:22:57.258165 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-58c4b9449c-b89p9" Nov 28 11:23:16 crc kubenswrapper[4923]: I1128 11:23:16.805180 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-7fb5d894b8-9ncjh" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.726635 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-ld8vz"] Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.729669 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.731997 4923 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.732024 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.732645 4923 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-vjhw5" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.748419 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-kj8cf"] Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.749047 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-kj8cf" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.752286 4923 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.761795 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-kj8cf"] Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.816713 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ptnb\" (UniqueName: \"kubernetes.io/projected/83a5f653-71d2-4df3-bc29-b7bbbcf13765-kube-api-access-8ptnb\") pod \"frr-k8s-webhook-server-7fcb986d4-kj8cf\" (UID: \"83a5f653-71d2-4df3-bc29-b7bbbcf13765\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-kj8cf" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.816756 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fa72f0b5-6e71-4591-b569-5137c1176193-metrics-certs\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.816781 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/fa72f0b5-6e71-4591-b569-5137c1176193-frr-startup\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.816812 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/fa72f0b5-6e71-4591-b569-5137c1176193-frr-sockets\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.816828 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/fa72f0b5-6e71-4591-b569-5137c1176193-frr-conf\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.816849 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/fa72f0b5-6e71-4591-b569-5137c1176193-metrics\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.816865 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfgph\" (UniqueName: \"kubernetes.io/projected/fa72f0b5-6e71-4591-b569-5137c1176193-kube-api-access-dfgph\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.816882 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/83a5f653-71d2-4df3-bc29-b7bbbcf13765-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-kj8cf\" (UID: \"83a5f653-71d2-4df3-bc29-b7bbbcf13765\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-kj8cf" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.816907 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/fa72f0b5-6e71-4591-b569-5137c1176193-reloader\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.864235 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-tlcl5"] Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.865428 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-tlcl5" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.867808 4923 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.868236 4923 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.868984 4923 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-k9vbl" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.874422 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.901278 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-f8648f98b-42ftq"] Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.902060 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-42ftq" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.904880 4923 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.918067 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ptnb\" (UniqueName: \"kubernetes.io/projected/83a5f653-71d2-4df3-bc29-b7bbbcf13765-kube-api-access-8ptnb\") pod \"frr-k8s-webhook-server-7fcb986d4-kj8cf\" (UID: \"83a5f653-71d2-4df3-bc29-b7bbbcf13765\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-kj8cf" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.918111 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fa72f0b5-6e71-4591-b569-5137c1176193-metrics-certs\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.918135 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/fa72f0b5-6e71-4591-b569-5137c1176193-frr-startup\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.918159 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-memberlist\") pod \"speaker-tlcl5\" (UID: \"f5feb4d3-7f22-4324-a745-2dcd5ec72db9\") " pod="metallb-system/speaker-tlcl5" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.918182 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stqzt\" (UniqueName: \"kubernetes.io/projected/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-kube-api-access-stqzt\") pod \"speaker-tlcl5\" (UID: \"f5feb4d3-7f22-4324-a745-2dcd5ec72db9\") " pod="metallb-system/speaker-tlcl5" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.918200 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/fa72f0b5-6e71-4591-b569-5137c1176193-frr-sockets\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.918217 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/fa72f0b5-6e71-4591-b569-5137c1176193-frr-conf\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.918235 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/fa72f0b5-6e71-4591-b569-5137c1176193-metrics\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.918251 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-metallb-excludel2\") pod \"speaker-tlcl5\" (UID: \"f5feb4d3-7f22-4324-a745-2dcd5ec72db9\") " pod="metallb-system/speaker-tlcl5" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.918268 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfgph\" (UniqueName: \"kubernetes.io/projected/fa72f0b5-6e71-4591-b569-5137c1176193-kube-api-access-dfgph\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.918287 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/83a5f653-71d2-4df3-bc29-b7bbbcf13765-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-kj8cf\" (UID: \"83a5f653-71d2-4df3-bc29-b7bbbcf13765\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-kj8cf" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.918304 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/fa72f0b5-6e71-4591-b569-5137c1176193-reloader\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.918321 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-metrics-certs\") pod \"speaker-tlcl5\" (UID: \"f5feb4d3-7f22-4324-a745-2dcd5ec72db9\") " pod="metallb-system/speaker-tlcl5" Nov 28 11:23:17 crc kubenswrapper[4923]: E1128 11:23:17.918627 4923 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Nov 28 11:23:17 crc kubenswrapper[4923]: E1128 11:23:17.918667 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fa72f0b5-6e71-4591-b569-5137c1176193-metrics-certs podName:fa72f0b5-6e71-4591-b569-5137c1176193 nodeName:}" failed. No retries permitted until 2025-11-28 11:23:18.418652902 +0000 UTC m=+877.547337112 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fa72f0b5-6e71-4591-b569-5137c1176193-metrics-certs") pod "frr-k8s-ld8vz" (UID: "fa72f0b5-6e71-4591-b569-5137c1176193") : secret "frr-k8s-certs-secret" not found Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.919527 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/fa72f0b5-6e71-4591-b569-5137c1176193-frr-startup\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.919754 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/fa72f0b5-6e71-4591-b569-5137c1176193-frr-sockets\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.919959 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/fa72f0b5-6e71-4591-b569-5137c1176193-frr-conf\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.920133 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/fa72f0b5-6e71-4591-b569-5137c1176193-metrics\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.921110 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/fa72f0b5-6e71-4591-b569-5137c1176193-reloader\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.929679 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/83a5f653-71d2-4df3-bc29-b7bbbcf13765-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-kj8cf\" (UID: \"83a5f653-71d2-4df3-bc29-b7bbbcf13765\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-kj8cf" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.931654 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-42ftq"] Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.952530 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ptnb\" (UniqueName: \"kubernetes.io/projected/83a5f653-71d2-4df3-bc29-b7bbbcf13765-kube-api-access-8ptnb\") pod \"frr-k8s-webhook-server-7fcb986d4-kj8cf\" (UID: \"83a5f653-71d2-4df3-bc29-b7bbbcf13765\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-kj8cf" Nov 28 11:23:17 crc kubenswrapper[4923]: I1128 11:23:17.955347 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfgph\" (UniqueName: \"kubernetes.io/projected/fa72f0b5-6e71-4591-b569-5137c1176193-kube-api-access-dfgph\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.019273 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9cc1acfe-a432-40e8-859a-a026519e0b19-metrics-certs\") pod \"controller-f8648f98b-42ftq\" (UID: \"9cc1acfe-a432-40e8-859a-a026519e0b19\") " pod="metallb-system/controller-f8648f98b-42ftq" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.019596 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-memberlist\") pod \"speaker-tlcl5\" (UID: \"f5feb4d3-7f22-4324-a745-2dcd5ec72db9\") " pod="metallb-system/speaker-tlcl5" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.019620 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stqzt\" (UniqueName: \"kubernetes.io/projected/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-kube-api-access-stqzt\") pod \"speaker-tlcl5\" (UID: \"f5feb4d3-7f22-4324-a745-2dcd5ec72db9\") " pod="metallb-system/speaker-tlcl5" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.019651 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-metallb-excludel2\") pod \"speaker-tlcl5\" (UID: \"f5feb4d3-7f22-4324-a745-2dcd5ec72db9\") " pod="metallb-system/speaker-tlcl5" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.019667 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwr2m\" (UniqueName: \"kubernetes.io/projected/9cc1acfe-a432-40e8-859a-a026519e0b19-kube-api-access-xwr2m\") pod \"controller-f8648f98b-42ftq\" (UID: \"9cc1acfe-a432-40e8-859a-a026519e0b19\") " pod="metallb-system/controller-f8648f98b-42ftq" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.019699 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-metrics-certs\") pod \"speaker-tlcl5\" (UID: \"f5feb4d3-7f22-4324-a745-2dcd5ec72db9\") " pod="metallb-system/speaker-tlcl5" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.019731 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9cc1acfe-a432-40e8-859a-a026519e0b19-cert\") pod \"controller-f8648f98b-42ftq\" (UID: \"9cc1acfe-a432-40e8-859a-a026519e0b19\") " pod="metallb-system/controller-f8648f98b-42ftq" Nov 28 11:23:18 crc kubenswrapper[4923]: E1128 11:23:18.019741 4923 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 28 11:23:18 crc kubenswrapper[4923]: E1128 11:23:18.019790 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-memberlist podName:f5feb4d3-7f22-4324-a745-2dcd5ec72db9 nodeName:}" failed. No retries permitted until 2025-11-28 11:23:18.519775808 +0000 UTC m=+877.648460018 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-memberlist") pod "speaker-tlcl5" (UID: "f5feb4d3-7f22-4324-a745-2dcd5ec72db9") : secret "metallb-memberlist" not found Nov 28 11:23:18 crc kubenswrapper[4923]: E1128 11:23:18.019851 4923 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Nov 28 11:23:18 crc kubenswrapper[4923]: E1128 11:23:18.019878 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-metrics-certs podName:f5feb4d3-7f22-4324-a745-2dcd5ec72db9 nodeName:}" failed. No retries permitted until 2025-11-28 11:23:18.519869521 +0000 UTC m=+877.648553721 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-metrics-certs") pod "speaker-tlcl5" (UID: "f5feb4d3-7f22-4324-a745-2dcd5ec72db9") : secret "speaker-certs-secret" not found Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.020605 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-metallb-excludel2\") pod \"speaker-tlcl5\" (UID: \"f5feb4d3-7f22-4324-a745-2dcd5ec72db9\") " pod="metallb-system/speaker-tlcl5" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.036630 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stqzt\" (UniqueName: \"kubernetes.io/projected/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-kube-api-access-stqzt\") pod \"speaker-tlcl5\" (UID: \"f5feb4d3-7f22-4324-a745-2dcd5ec72db9\") " pod="metallb-system/speaker-tlcl5" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.061767 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-kj8cf" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.120806 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwr2m\" (UniqueName: \"kubernetes.io/projected/9cc1acfe-a432-40e8-859a-a026519e0b19-kube-api-access-xwr2m\") pod \"controller-f8648f98b-42ftq\" (UID: \"9cc1acfe-a432-40e8-859a-a026519e0b19\") " pod="metallb-system/controller-f8648f98b-42ftq" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.120914 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9cc1acfe-a432-40e8-859a-a026519e0b19-cert\") pod \"controller-f8648f98b-42ftq\" (UID: \"9cc1acfe-a432-40e8-859a-a026519e0b19\") " pod="metallb-system/controller-f8648f98b-42ftq" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.120989 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9cc1acfe-a432-40e8-859a-a026519e0b19-metrics-certs\") pod \"controller-f8648f98b-42ftq\" (UID: \"9cc1acfe-a432-40e8-859a-a026519e0b19\") " pod="metallb-system/controller-f8648f98b-42ftq" Nov 28 11:23:18 crc kubenswrapper[4923]: E1128 11:23:18.121132 4923 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Nov 28 11:23:18 crc kubenswrapper[4923]: E1128 11:23:18.121205 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9cc1acfe-a432-40e8-859a-a026519e0b19-metrics-certs podName:9cc1acfe-a432-40e8-859a-a026519e0b19 nodeName:}" failed. No retries permitted until 2025-11-28 11:23:18.621189792 +0000 UTC m=+877.749874002 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9cc1acfe-a432-40e8-859a-a026519e0b19-metrics-certs") pod "controller-f8648f98b-42ftq" (UID: "9cc1acfe-a432-40e8-859a-a026519e0b19") : secret "controller-certs-secret" not found Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.124406 4923 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.136795 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9cc1acfe-a432-40e8-859a-a026519e0b19-cert\") pod \"controller-f8648f98b-42ftq\" (UID: \"9cc1acfe-a432-40e8-859a-a026519e0b19\") " pod="metallb-system/controller-f8648f98b-42ftq" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.149433 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwr2m\" (UniqueName: \"kubernetes.io/projected/9cc1acfe-a432-40e8-859a-a026519e0b19-kube-api-access-xwr2m\") pod \"controller-f8648f98b-42ftq\" (UID: \"9cc1acfe-a432-40e8-859a-a026519e0b19\") " pod="metallb-system/controller-f8648f98b-42ftq" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.426237 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fa72f0b5-6e71-4591-b569-5137c1176193-metrics-certs\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.431020 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fa72f0b5-6e71-4591-b569-5137c1176193-metrics-certs\") pod \"frr-k8s-ld8vz\" (UID: \"fa72f0b5-6e71-4591-b569-5137c1176193\") " pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.528010 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-memberlist\") pod \"speaker-tlcl5\" (UID: \"f5feb4d3-7f22-4324-a745-2dcd5ec72db9\") " pod="metallb-system/speaker-tlcl5" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.528085 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-metrics-certs\") pod \"speaker-tlcl5\" (UID: \"f5feb4d3-7f22-4324-a745-2dcd5ec72db9\") " pod="metallb-system/speaker-tlcl5" Nov 28 11:23:18 crc kubenswrapper[4923]: E1128 11:23:18.528304 4923 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 28 11:23:18 crc kubenswrapper[4923]: E1128 11:23:18.528445 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-memberlist podName:f5feb4d3-7f22-4324-a745-2dcd5ec72db9 nodeName:}" failed. No retries permitted until 2025-11-28 11:23:19.528417143 +0000 UTC m=+878.657101383 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-memberlist") pod "speaker-tlcl5" (UID: "f5feb4d3-7f22-4324-a745-2dcd5ec72db9") : secret "metallb-memberlist" not found Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.530649 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-metrics-certs\") pod \"speaker-tlcl5\" (UID: \"f5feb4d3-7f22-4324-a745-2dcd5ec72db9\") " pod="metallb-system/speaker-tlcl5" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.541606 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-kj8cf"] Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.629017 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9cc1acfe-a432-40e8-859a-a026519e0b19-metrics-certs\") pod \"controller-f8648f98b-42ftq\" (UID: \"9cc1acfe-a432-40e8-859a-a026519e0b19\") " pod="metallb-system/controller-f8648f98b-42ftq" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.632593 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9cc1acfe-a432-40e8-859a-a026519e0b19-metrics-certs\") pod \"controller-f8648f98b-42ftq\" (UID: \"9cc1acfe-a432-40e8-859a-a026519e0b19\") " pod="metallb-system/controller-f8648f98b-42ftq" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.647204 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:18 crc kubenswrapper[4923]: I1128 11:23:18.813339 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-42ftq" Nov 28 11:23:19 crc kubenswrapper[4923]: I1128 11:23:19.041905 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-42ftq"] Nov 28 11:23:19 crc kubenswrapper[4923]: I1128 11:23:19.087000 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld8vz" event={"ID":"fa72f0b5-6e71-4591-b569-5137c1176193","Type":"ContainerStarted","Data":"bd4b48bcce3b2630f2f05c9b40c4a2a37602b7ebbad2fd087d2c6dc6eb3cb5af"} Nov 28 11:23:19 crc kubenswrapper[4923]: I1128 11:23:19.087740 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-kj8cf" event={"ID":"83a5f653-71d2-4df3-bc29-b7bbbcf13765","Type":"ContainerStarted","Data":"e281869af7e9548cbfd1ceba04af9376ab9b7957ec4deaa3a17d816e15a4d521"} Nov 28 11:23:19 crc kubenswrapper[4923]: I1128 11:23:19.088422 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-42ftq" event={"ID":"9cc1acfe-a432-40e8-859a-a026519e0b19","Type":"ContainerStarted","Data":"75ba9e5f9315f0a26b8e679bc225733acdf672b2367c196592fcbbcf92488ab0"} Nov 28 11:23:19 crc kubenswrapper[4923]: I1128 11:23:19.542126 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-memberlist\") pod \"speaker-tlcl5\" (UID: \"f5feb4d3-7f22-4324-a745-2dcd5ec72db9\") " pod="metallb-system/speaker-tlcl5" Nov 28 11:23:19 crc kubenswrapper[4923]: I1128 11:23:19.547541 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f5feb4d3-7f22-4324-a745-2dcd5ec72db9-memberlist\") pod \"speaker-tlcl5\" (UID: \"f5feb4d3-7f22-4324-a745-2dcd5ec72db9\") " pod="metallb-system/speaker-tlcl5" Nov 28 11:23:19 crc kubenswrapper[4923]: I1128 11:23:19.677043 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-tlcl5" Nov 28 11:23:19 crc kubenswrapper[4923]: W1128 11:23:19.706422 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf5feb4d3_7f22_4324_a745_2dcd5ec72db9.slice/crio-5ed1c9224cde2325bc03158e73a3afc2fd72b79e753b3a55e9cbf30e04441968 WatchSource:0}: Error finding container 5ed1c9224cde2325bc03158e73a3afc2fd72b79e753b3a55e9cbf30e04441968: Status 404 returned error can't find the container with id 5ed1c9224cde2325bc03158e73a3afc2fd72b79e753b3a55e9cbf30e04441968 Nov 28 11:23:20 crc kubenswrapper[4923]: I1128 11:23:20.105245 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-42ftq" event={"ID":"9cc1acfe-a432-40e8-859a-a026519e0b19","Type":"ContainerStarted","Data":"a3270e05cf5dc9dfe0f86f597c42d044b1c1c8b98c601a3eaec64dc736cab04f"} Nov 28 11:23:20 crc kubenswrapper[4923]: I1128 11:23:20.105283 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-42ftq" event={"ID":"9cc1acfe-a432-40e8-859a-a026519e0b19","Type":"ContainerStarted","Data":"d168b72429fc7ad64a2658135e2ef5342166082954dec4c36b0404ee78f4735f"} Nov 28 11:23:20 crc kubenswrapper[4923]: I1128 11:23:20.106070 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-f8648f98b-42ftq" Nov 28 11:23:20 crc kubenswrapper[4923]: I1128 11:23:20.142966 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-tlcl5" event={"ID":"f5feb4d3-7f22-4324-a745-2dcd5ec72db9","Type":"ContainerStarted","Data":"1b75f31aea2f03577036394e336464da6f1c94ccc300492b905812251e54f0ce"} Nov 28 11:23:20 crc kubenswrapper[4923]: I1128 11:23:20.143004 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-tlcl5" event={"ID":"f5feb4d3-7f22-4324-a745-2dcd5ec72db9","Type":"ContainerStarted","Data":"5ed1c9224cde2325bc03158e73a3afc2fd72b79e753b3a55e9cbf30e04441968"} Nov 28 11:23:20 crc kubenswrapper[4923]: I1128 11:23:20.168404 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-f8648f98b-42ftq" podStartSLOduration=3.168380374 podStartE2EDuration="3.168380374s" podCreationTimestamp="2025-11-28 11:23:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:23:20.162323544 +0000 UTC m=+879.291007754" watchObservedRunningTime="2025-11-28 11:23:20.168380374 +0000 UTC m=+879.297064584" Nov 28 11:23:21 crc kubenswrapper[4923]: I1128 11:23:21.150717 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-tlcl5" event={"ID":"f5feb4d3-7f22-4324-a745-2dcd5ec72db9","Type":"ContainerStarted","Data":"dc3bfac9ea8ef791ef9a0dc87d3b4f66de33a341141ffa548a0b76711276e017"} Nov 28 11:23:21 crc kubenswrapper[4923]: I1128 11:23:21.150960 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-tlcl5" Nov 28 11:23:21 crc kubenswrapper[4923]: I1128 11:23:21.177458 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-tlcl5" podStartSLOduration=4.177434771 podStartE2EDuration="4.177434771s" podCreationTimestamp="2025-11-28 11:23:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:23:21.170111165 +0000 UTC m=+880.298795375" watchObservedRunningTime="2025-11-28 11:23:21.177434771 +0000 UTC m=+880.306118981" Nov 28 11:23:27 crc kubenswrapper[4923]: I1128 11:23:27.195154 4923 generic.go:334] "Generic (PLEG): container finished" podID="fa72f0b5-6e71-4591-b569-5137c1176193" containerID="7fddcc3fcda7854a790db0bb2eb3a0fe0e275192aa72cefb624473187bba5b19" exitCode=0 Nov 28 11:23:27 crc kubenswrapper[4923]: I1128 11:23:27.195595 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld8vz" event={"ID":"fa72f0b5-6e71-4591-b569-5137c1176193","Type":"ContainerDied","Data":"7fddcc3fcda7854a790db0bb2eb3a0fe0e275192aa72cefb624473187bba5b19"} Nov 28 11:23:27 crc kubenswrapper[4923]: I1128 11:23:27.198527 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-kj8cf" event={"ID":"83a5f653-71d2-4df3-bc29-b7bbbcf13765","Type":"ContainerStarted","Data":"848aeb5dd33b203ae059bc67a12a9640770e8b9d1d4bd979471df0d3db756a2f"} Nov 28 11:23:27 crc kubenswrapper[4923]: I1128 11:23:27.198743 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-kj8cf" Nov 28 11:23:27 crc kubenswrapper[4923]: I1128 11:23:27.254562 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-kj8cf" podStartSLOduration=2.04104995 podStartE2EDuration="10.254544054s" podCreationTimestamp="2025-11-28 11:23:17 +0000 UTC" firstStartedPulling="2025-11-28 11:23:18.553654443 +0000 UTC m=+877.682338693" lastFinishedPulling="2025-11-28 11:23:26.767148577 +0000 UTC m=+885.895832797" observedRunningTime="2025-11-28 11:23:27.251804027 +0000 UTC m=+886.380488257" watchObservedRunningTime="2025-11-28 11:23:27.254544054 +0000 UTC m=+886.383228264" Nov 28 11:23:27 crc kubenswrapper[4923]: I1128 11:23:27.608265 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-q6cvp"] Nov 28 11:23:27 crc kubenswrapper[4923]: I1128 11:23:27.610151 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q6cvp" Nov 28 11:23:27 crc kubenswrapper[4923]: I1128 11:23:27.669803 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q6cvp"] Nov 28 11:23:27 crc kubenswrapper[4923]: I1128 11:23:27.676984 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9fbccbd-9e42-4378-9f99-c9df00ed11ff-catalog-content\") pod \"community-operators-q6cvp\" (UID: \"a9fbccbd-9e42-4378-9f99-c9df00ed11ff\") " pod="openshift-marketplace/community-operators-q6cvp" Nov 28 11:23:27 crc kubenswrapper[4923]: I1128 11:23:27.677062 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9fbccbd-9e42-4378-9f99-c9df00ed11ff-utilities\") pod \"community-operators-q6cvp\" (UID: \"a9fbccbd-9e42-4378-9f99-c9df00ed11ff\") " pod="openshift-marketplace/community-operators-q6cvp" Nov 28 11:23:27 crc kubenswrapper[4923]: I1128 11:23:27.677122 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfbc7\" (UniqueName: \"kubernetes.io/projected/a9fbccbd-9e42-4378-9f99-c9df00ed11ff-kube-api-access-cfbc7\") pod \"community-operators-q6cvp\" (UID: \"a9fbccbd-9e42-4378-9f99-c9df00ed11ff\") " pod="openshift-marketplace/community-operators-q6cvp" Nov 28 11:23:27 crc kubenswrapper[4923]: I1128 11:23:27.778561 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9fbccbd-9e42-4378-9f99-c9df00ed11ff-catalog-content\") pod \"community-operators-q6cvp\" (UID: \"a9fbccbd-9e42-4378-9f99-c9df00ed11ff\") " pod="openshift-marketplace/community-operators-q6cvp" Nov 28 11:23:27 crc kubenswrapper[4923]: I1128 11:23:27.778654 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9fbccbd-9e42-4378-9f99-c9df00ed11ff-utilities\") pod \"community-operators-q6cvp\" (UID: \"a9fbccbd-9e42-4378-9f99-c9df00ed11ff\") " pod="openshift-marketplace/community-operators-q6cvp" Nov 28 11:23:27 crc kubenswrapper[4923]: I1128 11:23:27.778691 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfbc7\" (UniqueName: \"kubernetes.io/projected/a9fbccbd-9e42-4378-9f99-c9df00ed11ff-kube-api-access-cfbc7\") pod \"community-operators-q6cvp\" (UID: \"a9fbccbd-9e42-4378-9f99-c9df00ed11ff\") " pod="openshift-marketplace/community-operators-q6cvp" Nov 28 11:23:27 crc kubenswrapper[4923]: I1128 11:23:27.779126 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9fbccbd-9e42-4378-9f99-c9df00ed11ff-catalog-content\") pod \"community-operators-q6cvp\" (UID: \"a9fbccbd-9e42-4378-9f99-c9df00ed11ff\") " pod="openshift-marketplace/community-operators-q6cvp" Nov 28 11:23:27 crc kubenswrapper[4923]: I1128 11:23:27.779205 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9fbccbd-9e42-4378-9f99-c9df00ed11ff-utilities\") pod \"community-operators-q6cvp\" (UID: \"a9fbccbd-9e42-4378-9f99-c9df00ed11ff\") " pod="openshift-marketplace/community-operators-q6cvp" Nov 28 11:23:27 crc kubenswrapper[4923]: I1128 11:23:27.797322 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfbc7\" (UniqueName: \"kubernetes.io/projected/a9fbccbd-9e42-4378-9f99-c9df00ed11ff-kube-api-access-cfbc7\") pod \"community-operators-q6cvp\" (UID: \"a9fbccbd-9e42-4378-9f99-c9df00ed11ff\") " pod="openshift-marketplace/community-operators-q6cvp" Nov 28 11:23:27 crc kubenswrapper[4923]: I1128 11:23:27.927858 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q6cvp" Nov 28 11:23:28 crc kubenswrapper[4923]: I1128 11:23:28.204972 4923 generic.go:334] "Generic (PLEG): container finished" podID="fa72f0b5-6e71-4591-b569-5137c1176193" containerID="068ab1da5db3aca52adbc6740ad1311ef4628ab29226b7818b88d2e41998d084" exitCode=0 Nov 28 11:23:28 crc kubenswrapper[4923]: I1128 11:23:28.205032 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld8vz" event={"ID":"fa72f0b5-6e71-4591-b569-5137c1176193","Type":"ContainerDied","Data":"068ab1da5db3aca52adbc6740ad1311ef4628ab29226b7818b88d2e41998d084"} Nov 28 11:23:28 crc kubenswrapper[4923]: I1128 11:23:28.592777 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q6cvp"] Nov 28 11:23:28 crc kubenswrapper[4923]: W1128 11:23:28.595599 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda9fbccbd_9e42_4378_9f99_c9df00ed11ff.slice/crio-d650cbe1e97a1b1900d21a011133f67ac0155639b7d15350ae957f252b64fc19 WatchSource:0}: Error finding container d650cbe1e97a1b1900d21a011133f67ac0155639b7d15350ae957f252b64fc19: Status 404 returned error can't find the container with id d650cbe1e97a1b1900d21a011133f67ac0155639b7d15350ae957f252b64fc19 Nov 28 11:23:29 crc kubenswrapper[4923]: I1128 11:23:29.213999 4923 generic.go:334] "Generic (PLEG): container finished" podID="a9fbccbd-9e42-4378-9f99-c9df00ed11ff" containerID="e7a869ed2dc9ba109a3fca3ad1b043fda0fc0e2615264d2ed02d4bd5b17630d5" exitCode=0 Nov 28 11:23:29 crc kubenswrapper[4923]: I1128 11:23:29.214065 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q6cvp" event={"ID":"a9fbccbd-9e42-4378-9f99-c9df00ed11ff","Type":"ContainerDied","Data":"e7a869ed2dc9ba109a3fca3ad1b043fda0fc0e2615264d2ed02d4bd5b17630d5"} Nov 28 11:23:29 crc kubenswrapper[4923]: I1128 11:23:29.214330 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q6cvp" event={"ID":"a9fbccbd-9e42-4378-9f99-c9df00ed11ff","Type":"ContainerStarted","Data":"d650cbe1e97a1b1900d21a011133f67ac0155639b7d15350ae957f252b64fc19"} Nov 28 11:23:29 crc kubenswrapper[4923]: I1128 11:23:29.220427 4923 generic.go:334] "Generic (PLEG): container finished" podID="fa72f0b5-6e71-4591-b569-5137c1176193" containerID="3b428fdc49dd94b7df757da3cebfb1d77d531f4f00f3c6355b6906d7e381fa85" exitCode=0 Nov 28 11:23:29 crc kubenswrapper[4923]: I1128 11:23:29.220486 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld8vz" event={"ID":"fa72f0b5-6e71-4591-b569-5137c1176193","Type":"ContainerDied","Data":"3b428fdc49dd94b7df757da3cebfb1d77d531f4f00f3c6355b6906d7e381fa85"} Nov 28 11:23:29 crc kubenswrapper[4923]: I1128 11:23:29.680620 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-tlcl5" Nov 28 11:23:30 crc kubenswrapper[4923]: I1128 11:23:30.228919 4923 generic.go:334] "Generic (PLEG): container finished" podID="a9fbccbd-9e42-4378-9f99-c9df00ed11ff" containerID="70a19dba24814602cfa055aef14f6c8958d0da5ee79f2562c2a278749b33e475" exitCode=0 Nov 28 11:23:30 crc kubenswrapper[4923]: I1128 11:23:30.229156 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q6cvp" event={"ID":"a9fbccbd-9e42-4378-9f99-c9df00ed11ff","Type":"ContainerDied","Data":"70a19dba24814602cfa055aef14f6c8958d0da5ee79f2562c2a278749b33e475"} Nov 28 11:23:30 crc kubenswrapper[4923]: I1128 11:23:30.238102 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld8vz" event={"ID":"fa72f0b5-6e71-4591-b569-5137c1176193","Type":"ContainerStarted","Data":"3783ef5e66d437522587b4e8df6ffa0d8ffcd3529964ddfb7262948eb5fc3add"} Nov 28 11:23:30 crc kubenswrapper[4923]: I1128 11:23:30.238135 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld8vz" event={"ID":"fa72f0b5-6e71-4591-b569-5137c1176193","Type":"ContainerStarted","Data":"abaf9e6ecf2103c6482346783a5614955cf98efbb027171dcbc98a57e2d5d966"} Nov 28 11:23:30 crc kubenswrapper[4923]: I1128 11:23:30.238144 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld8vz" event={"ID":"fa72f0b5-6e71-4591-b569-5137c1176193","Type":"ContainerStarted","Data":"9b6fc181fc5ef81ab58bf73d08e8fbfbdfa25b4e5732a66aef235786c9681f8f"} Nov 28 11:23:30 crc kubenswrapper[4923]: I1128 11:23:30.238154 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld8vz" event={"ID":"fa72f0b5-6e71-4591-b569-5137c1176193","Type":"ContainerStarted","Data":"27155779f4def20a0bbdc266ee8ee728ac8267690ce727a26be889b512839cdb"} Nov 28 11:23:30 crc kubenswrapper[4923]: I1128 11:23:30.238163 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld8vz" event={"ID":"fa72f0b5-6e71-4591-b569-5137c1176193","Type":"ContainerStarted","Data":"43b5e8c2a3a98a8cd443820df35246cacef3e8d47ff0783f1b805564c2c3ef2e"} Nov 28 11:23:31 crc kubenswrapper[4923]: I1128 11:23:31.249602 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q6cvp" event={"ID":"a9fbccbd-9e42-4378-9f99-c9df00ed11ff","Type":"ContainerStarted","Data":"c40d482b85b162cb5f6ac611c3a838380bc88db9b7215976731dcf8286a0f6b4"} Nov 28 11:23:31 crc kubenswrapper[4923]: I1128 11:23:31.254683 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld8vz" event={"ID":"fa72f0b5-6e71-4591-b569-5137c1176193","Type":"ContainerStarted","Data":"b14d655b189aa1a633fa805d2ed27d3edb80ba0b12307b1a8c8acf6ec02d8021"} Nov 28 11:23:31 crc kubenswrapper[4923]: I1128 11:23:31.255676 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:31 crc kubenswrapper[4923]: I1128 11:23:31.276056 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-q6cvp" podStartSLOduration=2.8123699159999997 podStartE2EDuration="4.276043417s" podCreationTimestamp="2025-11-28 11:23:27 +0000 UTC" firstStartedPulling="2025-11-28 11:23:29.216629721 +0000 UTC m=+888.345313971" lastFinishedPulling="2025-11-28 11:23:30.680303222 +0000 UTC m=+889.808987472" observedRunningTime="2025-11-28 11:23:31.271920421 +0000 UTC m=+890.400604641" watchObservedRunningTime="2025-11-28 11:23:31.276043417 +0000 UTC m=+890.404727637" Nov 28 11:23:31 crc kubenswrapper[4923]: I1128 11:23:31.315311 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-ld8vz" podStartSLOduration=6.364540581 podStartE2EDuration="14.315288582s" podCreationTimestamp="2025-11-28 11:23:17 +0000 UTC" firstStartedPulling="2025-11-28 11:23:18.780495246 +0000 UTC m=+877.909179486" lastFinishedPulling="2025-11-28 11:23:26.731243267 +0000 UTC m=+885.859927487" observedRunningTime="2025-11-28 11:23:31.307511813 +0000 UTC m=+890.436196043" watchObservedRunningTime="2025-11-28 11:23:31.315288582 +0000 UTC m=+890.443972832" Nov 28 11:23:33 crc kubenswrapper[4923]: I1128 11:23:33.648351 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:33 crc kubenswrapper[4923]: I1128 11:23:33.710234 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:35 crc kubenswrapper[4923]: I1128 11:23:35.383675 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-gtnfh"] Nov 28 11:23:35 crc kubenswrapper[4923]: I1128 11:23:35.385118 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-gtnfh" Nov 28 11:23:35 crc kubenswrapper[4923]: I1128 11:23:35.388844 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 28 11:23:35 crc kubenswrapper[4923]: I1128 11:23:35.389218 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-kjrtz" Nov 28 11:23:35 crc kubenswrapper[4923]: I1128 11:23:35.389703 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 28 11:23:35 crc kubenswrapper[4923]: I1128 11:23:35.397259 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-gtnfh"] Nov 28 11:23:35 crc kubenswrapper[4923]: I1128 11:23:35.519860 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-928sm\" (UniqueName: \"kubernetes.io/projected/2c55fa30-52ca-462f-871b-85851e250a99-kube-api-access-928sm\") pod \"openstack-operator-index-gtnfh\" (UID: \"2c55fa30-52ca-462f-871b-85851e250a99\") " pod="openstack-operators/openstack-operator-index-gtnfh" Nov 28 11:23:35 crc kubenswrapper[4923]: I1128 11:23:35.622736 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-928sm\" (UniqueName: \"kubernetes.io/projected/2c55fa30-52ca-462f-871b-85851e250a99-kube-api-access-928sm\") pod \"openstack-operator-index-gtnfh\" (UID: \"2c55fa30-52ca-462f-871b-85851e250a99\") " pod="openstack-operators/openstack-operator-index-gtnfh" Nov 28 11:23:35 crc kubenswrapper[4923]: I1128 11:23:35.658437 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-928sm\" (UniqueName: \"kubernetes.io/projected/2c55fa30-52ca-462f-871b-85851e250a99-kube-api-access-928sm\") pod \"openstack-operator-index-gtnfh\" (UID: \"2c55fa30-52ca-462f-871b-85851e250a99\") " pod="openstack-operators/openstack-operator-index-gtnfh" Nov 28 11:23:35 crc kubenswrapper[4923]: I1128 11:23:35.709564 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-gtnfh" Nov 28 11:23:36 crc kubenswrapper[4923]: I1128 11:23:36.000562 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-gtnfh"] Nov 28 11:23:36 crc kubenswrapper[4923]: I1128 11:23:36.286954 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-gtnfh" event={"ID":"2c55fa30-52ca-462f-871b-85851e250a99","Type":"ContainerStarted","Data":"848ba12c69a9eacbe11ea3490ae46184ddc68ab2e2cfb97289128fabc175b9ac"} Nov 28 11:23:37 crc kubenswrapper[4923]: I1128 11:23:37.928863 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-q6cvp" Nov 28 11:23:37 crc kubenswrapper[4923]: I1128 11:23:37.929189 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-q6cvp" Nov 28 11:23:37 crc kubenswrapper[4923]: I1128 11:23:37.985765 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-q6cvp" Nov 28 11:23:38 crc kubenswrapper[4923]: I1128 11:23:38.069477 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-kj8cf" Nov 28 11:23:38 crc kubenswrapper[4923]: I1128 11:23:38.337166 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-q6cvp" Nov 28 11:23:38 crc kubenswrapper[4923]: I1128 11:23:38.818121 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-f8648f98b-42ftq" Nov 28 11:23:39 crc kubenswrapper[4923]: I1128 11:23:39.305423 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-gtnfh" event={"ID":"2c55fa30-52ca-462f-871b-85851e250a99","Type":"ContainerStarted","Data":"c79233e96269a5b016b3ba93aefa81e8ddef57c5d6a0d099a627680b9a56530a"} Nov 28 11:23:39 crc kubenswrapper[4923]: I1128 11:23:39.580144 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-gtnfh"] Nov 28 11:23:40 crc kubenswrapper[4923]: I1128 11:23:40.192384 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-nmwq5"] Nov 28 11:23:40 crc kubenswrapper[4923]: I1128 11:23:40.193615 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-nmwq5" Nov 28 11:23:40 crc kubenswrapper[4923]: I1128 11:23:40.205584 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4xwp\" (UniqueName: \"kubernetes.io/projected/7493be0a-26b4-4c54-b0dd-456f39fe357e-kube-api-access-c4xwp\") pod \"openstack-operator-index-nmwq5\" (UID: \"7493be0a-26b4-4c54-b0dd-456f39fe357e\") " pod="openstack-operators/openstack-operator-index-nmwq5" Nov 28 11:23:40 crc kubenswrapper[4923]: I1128 11:23:40.211170 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-nmwq5"] Nov 28 11:23:40 crc kubenswrapper[4923]: I1128 11:23:40.306626 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4xwp\" (UniqueName: \"kubernetes.io/projected/7493be0a-26b4-4c54-b0dd-456f39fe357e-kube-api-access-c4xwp\") pod \"openstack-operator-index-nmwq5\" (UID: \"7493be0a-26b4-4c54-b0dd-456f39fe357e\") " pod="openstack-operators/openstack-operator-index-nmwq5" Nov 28 11:23:40 crc kubenswrapper[4923]: I1128 11:23:40.313960 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-gtnfh" podUID="2c55fa30-52ca-462f-871b-85851e250a99" containerName="registry-server" containerID="cri-o://c79233e96269a5b016b3ba93aefa81e8ddef57c5d6a0d099a627680b9a56530a" gracePeriod=2 Nov 28 11:23:40 crc kubenswrapper[4923]: I1128 11:23:40.340319 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4xwp\" (UniqueName: \"kubernetes.io/projected/7493be0a-26b4-4c54-b0dd-456f39fe357e-kube-api-access-c4xwp\") pod \"openstack-operator-index-nmwq5\" (UID: \"7493be0a-26b4-4c54-b0dd-456f39fe357e\") " pod="openstack-operators/openstack-operator-index-nmwq5" Nov 28 11:23:40 crc kubenswrapper[4923]: I1128 11:23:40.348914 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-gtnfh" podStartSLOduration=2.588373489 podStartE2EDuration="5.348883046s" podCreationTimestamp="2025-11-28 11:23:35 +0000 UTC" firstStartedPulling="2025-11-28 11:23:36.010607468 +0000 UTC m=+895.139291688" lastFinishedPulling="2025-11-28 11:23:38.771117025 +0000 UTC m=+897.899801245" observedRunningTime="2025-11-28 11:23:40.33977526 +0000 UTC m=+899.468459500" watchObservedRunningTime="2025-11-28 11:23:40.348883046 +0000 UTC m=+899.477567286" Nov 28 11:23:40 crc kubenswrapper[4923]: I1128 11:23:40.571906 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-nmwq5" Nov 28 11:23:40 crc kubenswrapper[4923]: I1128 11:23:40.707779 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-gtnfh" Nov 28 11:23:40 crc kubenswrapper[4923]: I1128 11:23:40.712374 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-928sm\" (UniqueName: \"kubernetes.io/projected/2c55fa30-52ca-462f-871b-85851e250a99-kube-api-access-928sm\") pod \"2c55fa30-52ca-462f-871b-85851e250a99\" (UID: \"2c55fa30-52ca-462f-871b-85851e250a99\") " Nov 28 11:23:40 crc kubenswrapper[4923]: I1128 11:23:40.717667 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c55fa30-52ca-462f-871b-85851e250a99-kube-api-access-928sm" (OuterVolumeSpecName: "kube-api-access-928sm") pod "2c55fa30-52ca-462f-871b-85851e250a99" (UID: "2c55fa30-52ca-462f-871b-85851e250a99"). InnerVolumeSpecName "kube-api-access-928sm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:23:40 crc kubenswrapper[4923]: I1128 11:23:40.813809 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-928sm\" (UniqueName: \"kubernetes.io/projected/2c55fa30-52ca-462f-871b-85851e250a99-kube-api-access-928sm\") on node \"crc\" DevicePath \"\"" Nov 28 11:23:41 crc kubenswrapper[4923]: I1128 11:23:41.013060 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-nmwq5"] Nov 28 11:23:41 crc kubenswrapper[4923]: W1128 11:23:41.022378 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7493be0a_26b4_4c54_b0dd_456f39fe357e.slice/crio-276fd56f2321106349bd9c83835b001bce82b1eb258dc5c4ca521021b4fb90a5 WatchSource:0}: Error finding container 276fd56f2321106349bd9c83835b001bce82b1eb258dc5c4ca521021b4fb90a5: Status 404 returned error can't find the container with id 276fd56f2321106349bd9c83835b001bce82b1eb258dc5c4ca521021b4fb90a5 Nov 28 11:23:41 crc kubenswrapper[4923]: I1128 11:23:41.329267 4923 generic.go:334] "Generic (PLEG): container finished" podID="2c55fa30-52ca-462f-871b-85851e250a99" containerID="c79233e96269a5b016b3ba93aefa81e8ddef57c5d6a0d099a627680b9a56530a" exitCode=0 Nov 28 11:23:41 crc kubenswrapper[4923]: I1128 11:23:41.329377 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-gtnfh" Nov 28 11:23:41 crc kubenswrapper[4923]: I1128 11:23:41.329413 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-gtnfh" event={"ID":"2c55fa30-52ca-462f-871b-85851e250a99","Type":"ContainerDied","Data":"c79233e96269a5b016b3ba93aefa81e8ddef57c5d6a0d099a627680b9a56530a"} Nov 28 11:23:41 crc kubenswrapper[4923]: I1128 11:23:41.330200 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-gtnfh" event={"ID":"2c55fa30-52ca-462f-871b-85851e250a99","Type":"ContainerDied","Data":"848ba12c69a9eacbe11ea3490ae46184ddc68ab2e2cfb97289128fabc175b9ac"} Nov 28 11:23:41 crc kubenswrapper[4923]: I1128 11:23:41.330238 4923 scope.go:117] "RemoveContainer" containerID="c79233e96269a5b016b3ba93aefa81e8ddef57c5d6a0d099a627680b9a56530a" Nov 28 11:23:41 crc kubenswrapper[4923]: I1128 11:23:41.333129 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-nmwq5" event={"ID":"7493be0a-26b4-4c54-b0dd-456f39fe357e","Type":"ContainerStarted","Data":"276fd56f2321106349bd9c83835b001bce82b1eb258dc5c4ca521021b4fb90a5"} Nov 28 11:23:41 crc kubenswrapper[4923]: I1128 11:23:41.368206 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-gtnfh"] Nov 28 11:23:41 crc kubenswrapper[4923]: I1128 11:23:41.387195 4923 scope.go:117] "RemoveContainer" containerID="c79233e96269a5b016b3ba93aefa81e8ddef57c5d6a0d099a627680b9a56530a" Nov 28 11:23:41 crc kubenswrapper[4923]: I1128 11:23:41.387634 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-gtnfh"] Nov 28 11:23:41 crc kubenswrapper[4923]: E1128 11:23:41.387810 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c79233e96269a5b016b3ba93aefa81e8ddef57c5d6a0d099a627680b9a56530a\": container with ID starting with c79233e96269a5b016b3ba93aefa81e8ddef57c5d6a0d099a627680b9a56530a not found: ID does not exist" containerID="c79233e96269a5b016b3ba93aefa81e8ddef57c5d6a0d099a627680b9a56530a" Nov 28 11:23:41 crc kubenswrapper[4923]: I1128 11:23:41.387875 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c79233e96269a5b016b3ba93aefa81e8ddef57c5d6a0d099a627680b9a56530a"} err="failed to get container status \"c79233e96269a5b016b3ba93aefa81e8ddef57c5d6a0d099a627680b9a56530a\": rpc error: code = NotFound desc = could not find container \"c79233e96269a5b016b3ba93aefa81e8ddef57c5d6a0d099a627680b9a56530a\": container with ID starting with c79233e96269a5b016b3ba93aefa81e8ddef57c5d6a0d099a627680b9a56530a not found: ID does not exist" Nov 28 11:23:41 crc kubenswrapper[4923]: I1128 11:23:41.780097 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q6cvp"] Nov 28 11:23:41 crc kubenswrapper[4923]: I1128 11:23:41.780441 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-q6cvp" podUID="a9fbccbd-9e42-4378-9f99-c9df00ed11ff" containerName="registry-server" containerID="cri-o://c40d482b85b162cb5f6ac611c3a838380bc88db9b7215976731dcf8286a0f6b4" gracePeriod=2 Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.234799 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q6cvp" Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.339470 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbc7\" (UniqueName: \"kubernetes.io/projected/a9fbccbd-9e42-4378-9f99-c9df00ed11ff-kube-api-access-cfbc7\") pod \"a9fbccbd-9e42-4378-9f99-c9df00ed11ff\" (UID: \"a9fbccbd-9e42-4378-9f99-c9df00ed11ff\") " Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.339589 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9fbccbd-9e42-4378-9f99-c9df00ed11ff-catalog-content\") pod \"a9fbccbd-9e42-4378-9f99-c9df00ed11ff\" (UID: \"a9fbccbd-9e42-4378-9f99-c9df00ed11ff\") " Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.339645 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9fbccbd-9e42-4378-9f99-c9df00ed11ff-utilities\") pod \"a9fbccbd-9e42-4378-9f99-c9df00ed11ff\" (UID: \"a9fbccbd-9e42-4378-9f99-c9df00ed11ff\") " Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.340946 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9fbccbd-9e42-4378-9f99-c9df00ed11ff-utilities" (OuterVolumeSpecName: "utilities") pod "a9fbccbd-9e42-4378-9f99-c9df00ed11ff" (UID: "a9fbccbd-9e42-4378-9f99-c9df00ed11ff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.344265 4923 generic.go:334] "Generic (PLEG): container finished" podID="a9fbccbd-9e42-4378-9f99-c9df00ed11ff" containerID="c40d482b85b162cb5f6ac611c3a838380bc88db9b7215976731dcf8286a0f6b4" exitCode=0 Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.344333 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q6cvp" event={"ID":"a9fbccbd-9e42-4378-9f99-c9df00ed11ff","Type":"ContainerDied","Data":"c40d482b85b162cb5f6ac611c3a838380bc88db9b7215976731dcf8286a0f6b4"} Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.344366 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q6cvp" event={"ID":"a9fbccbd-9e42-4378-9f99-c9df00ed11ff","Type":"ContainerDied","Data":"d650cbe1e97a1b1900d21a011133f67ac0155639b7d15350ae957f252b64fc19"} Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.344387 4923 scope.go:117] "RemoveContainer" containerID="c40d482b85b162cb5f6ac611c3a838380bc88db9b7215976731dcf8286a0f6b4" Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.344513 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q6cvp" Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.347955 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9fbccbd-9e42-4378-9f99-c9df00ed11ff-kube-api-access-cfbc7" (OuterVolumeSpecName: "kube-api-access-cfbc7") pod "a9fbccbd-9e42-4378-9f99-c9df00ed11ff" (UID: "a9fbccbd-9e42-4378-9f99-c9df00ed11ff"). InnerVolumeSpecName "kube-api-access-cfbc7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.349904 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-nmwq5" event={"ID":"7493be0a-26b4-4c54-b0dd-456f39fe357e","Type":"ContainerStarted","Data":"3dde429493597bfc0bf05420f850ce025d918fcdae6499f8556dc907210bea3b"} Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.374972 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-nmwq5" podStartSLOduration=2.187790368 podStartE2EDuration="2.374922774s" podCreationTimestamp="2025-11-28 11:23:40 +0000 UTC" firstStartedPulling="2025-11-28 11:23:41.028342468 +0000 UTC m=+900.157026688" lastFinishedPulling="2025-11-28 11:23:41.215474894 +0000 UTC m=+900.344159094" observedRunningTime="2025-11-28 11:23:42.365414626 +0000 UTC m=+901.494098856" watchObservedRunningTime="2025-11-28 11:23:42.374922774 +0000 UTC m=+901.503607014" Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.386030 4923 scope.go:117] "RemoveContainer" containerID="70a19dba24814602cfa055aef14f6c8958d0da5ee79f2562c2a278749b33e475" Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.409183 4923 scope.go:117] "RemoveContainer" containerID="e7a869ed2dc9ba109a3fca3ad1b043fda0fc0e2615264d2ed02d4bd5b17630d5" Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.418845 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9fbccbd-9e42-4378-9f99-c9df00ed11ff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a9fbccbd-9e42-4378-9f99-c9df00ed11ff" (UID: "a9fbccbd-9e42-4378-9f99-c9df00ed11ff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.421831 4923 scope.go:117] "RemoveContainer" containerID="c40d482b85b162cb5f6ac611c3a838380bc88db9b7215976731dcf8286a0f6b4" Nov 28 11:23:42 crc kubenswrapper[4923]: E1128 11:23:42.422279 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c40d482b85b162cb5f6ac611c3a838380bc88db9b7215976731dcf8286a0f6b4\": container with ID starting with c40d482b85b162cb5f6ac611c3a838380bc88db9b7215976731dcf8286a0f6b4 not found: ID does not exist" containerID="c40d482b85b162cb5f6ac611c3a838380bc88db9b7215976731dcf8286a0f6b4" Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.422315 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c40d482b85b162cb5f6ac611c3a838380bc88db9b7215976731dcf8286a0f6b4"} err="failed to get container status \"c40d482b85b162cb5f6ac611c3a838380bc88db9b7215976731dcf8286a0f6b4\": rpc error: code = NotFound desc = could not find container \"c40d482b85b162cb5f6ac611c3a838380bc88db9b7215976731dcf8286a0f6b4\": container with ID starting with c40d482b85b162cb5f6ac611c3a838380bc88db9b7215976731dcf8286a0f6b4 not found: ID does not exist" Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.422336 4923 scope.go:117] "RemoveContainer" containerID="70a19dba24814602cfa055aef14f6c8958d0da5ee79f2562c2a278749b33e475" Nov 28 11:23:42 crc kubenswrapper[4923]: E1128 11:23:42.422608 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70a19dba24814602cfa055aef14f6c8958d0da5ee79f2562c2a278749b33e475\": container with ID starting with 70a19dba24814602cfa055aef14f6c8958d0da5ee79f2562c2a278749b33e475 not found: ID does not exist" containerID="70a19dba24814602cfa055aef14f6c8958d0da5ee79f2562c2a278749b33e475" Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.422642 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70a19dba24814602cfa055aef14f6c8958d0da5ee79f2562c2a278749b33e475"} err="failed to get container status \"70a19dba24814602cfa055aef14f6c8958d0da5ee79f2562c2a278749b33e475\": rpc error: code = NotFound desc = could not find container \"70a19dba24814602cfa055aef14f6c8958d0da5ee79f2562c2a278749b33e475\": container with ID starting with 70a19dba24814602cfa055aef14f6c8958d0da5ee79f2562c2a278749b33e475 not found: ID does not exist" Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.422669 4923 scope.go:117] "RemoveContainer" containerID="e7a869ed2dc9ba109a3fca3ad1b043fda0fc0e2615264d2ed02d4bd5b17630d5" Nov 28 11:23:42 crc kubenswrapper[4923]: E1128 11:23:42.423075 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7a869ed2dc9ba109a3fca3ad1b043fda0fc0e2615264d2ed02d4bd5b17630d5\": container with ID starting with e7a869ed2dc9ba109a3fca3ad1b043fda0fc0e2615264d2ed02d4bd5b17630d5 not found: ID does not exist" containerID="e7a869ed2dc9ba109a3fca3ad1b043fda0fc0e2615264d2ed02d4bd5b17630d5" Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.423094 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7a869ed2dc9ba109a3fca3ad1b043fda0fc0e2615264d2ed02d4bd5b17630d5"} err="failed to get container status \"e7a869ed2dc9ba109a3fca3ad1b043fda0fc0e2615264d2ed02d4bd5b17630d5\": rpc error: code = NotFound desc = could not find container \"e7a869ed2dc9ba109a3fca3ad1b043fda0fc0e2615264d2ed02d4bd5b17630d5\": container with ID starting with e7a869ed2dc9ba109a3fca3ad1b043fda0fc0e2615264d2ed02d4bd5b17630d5 not found: ID does not exist" Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.441293 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9fbccbd-9e42-4378-9f99-c9df00ed11ff-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.441340 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbc7\" (UniqueName: \"kubernetes.io/projected/a9fbccbd-9e42-4378-9f99-c9df00ed11ff-kube-api-access-cfbc7\") on node \"crc\" DevicePath \"\"" Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.441362 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9fbccbd-9e42-4378-9f99-c9df00ed11ff-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.678640 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q6cvp"] Nov 28 11:23:42 crc kubenswrapper[4923]: I1128 11:23:42.683442 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-q6cvp"] Nov 28 11:23:43 crc kubenswrapper[4923]: I1128 11:23:43.184439 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c55fa30-52ca-462f-871b-85851e250a99" path="/var/lib/kubelet/pods/2c55fa30-52ca-462f-871b-85851e250a99/volumes" Nov 28 11:23:43 crc kubenswrapper[4923]: I1128 11:23:43.185716 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9fbccbd-9e42-4378-9f99-c9df00ed11ff" path="/var/lib/kubelet/pods/a9fbccbd-9e42-4378-9f99-c9df00ed11ff/volumes" Nov 28 11:23:43 crc kubenswrapper[4923]: E1128 11:23:43.631155 4923 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c55fa30_52ca_462f_871b_85851e250a99.slice/crio-848ba12c69a9eacbe11ea3490ae46184ddc68ab2e2cfb97289128fabc175b9ac\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c55fa30_52ca_462f_871b_85851e250a99.slice\": RecentStats: unable to find data in memory cache]" Nov 28 11:23:44 crc kubenswrapper[4923]: I1128 11:23:44.026917 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:23:44 crc kubenswrapper[4923]: I1128 11:23:44.027062 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.191798 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kxz7v"] Nov 28 11:23:46 crc kubenswrapper[4923]: E1128 11:23:46.192671 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9fbccbd-9e42-4378-9f99-c9df00ed11ff" containerName="extract-utilities" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.192689 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9fbccbd-9e42-4378-9f99-c9df00ed11ff" containerName="extract-utilities" Nov 28 11:23:46 crc kubenswrapper[4923]: E1128 11:23:46.192714 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9fbccbd-9e42-4378-9f99-c9df00ed11ff" containerName="registry-server" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.192722 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9fbccbd-9e42-4378-9f99-c9df00ed11ff" containerName="registry-server" Nov 28 11:23:46 crc kubenswrapper[4923]: E1128 11:23:46.192742 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9fbccbd-9e42-4378-9f99-c9df00ed11ff" containerName="extract-content" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.192750 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9fbccbd-9e42-4378-9f99-c9df00ed11ff" containerName="extract-content" Nov 28 11:23:46 crc kubenswrapper[4923]: E1128 11:23:46.192765 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c55fa30-52ca-462f-871b-85851e250a99" containerName="registry-server" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.192773 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c55fa30-52ca-462f-871b-85851e250a99" containerName="registry-server" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.192950 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9fbccbd-9e42-4378-9f99-c9df00ed11ff" containerName="registry-server" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.192975 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c55fa30-52ca-462f-871b-85851e250a99" containerName="registry-server" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.194030 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kxz7v" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.204219 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kxz7v"] Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.303736 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d-catalog-content\") pod \"redhat-marketplace-kxz7v\" (UID: \"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d\") " pod="openshift-marketplace/redhat-marketplace-kxz7v" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.303785 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d-utilities\") pod \"redhat-marketplace-kxz7v\" (UID: \"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d\") " pod="openshift-marketplace/redhat-marketplace-kxz7v" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.303920 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5n4xh\" (UniqueName: \"kubernetes.io/projected/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d-kube-api-access-5n4xh\") pod \"redhat-marketplace-kxz7v\" (UID: \"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d\") " pod="openshift-marketplace/redhat-marketplace-kxz7v" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.405793 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d-catalog-content\") pod \"redhat-marketplace-kxz7v\" (UID: \"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d\") " pod="openshift-marketplace/redhat-marketplace-kxz7v" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.405846 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d-utilities\") pod \"redhat-marketplace-kxz7v\" (UID: \"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d\") " pod="openshift-marketplace/redhat-marketplace-kxz7v" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.406414 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d-utilities\") pod \"redhat-marketplace-kxz7v\" (UID: \"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d\") " pod="openshift-marketplace/redhat-marketplace-kxz7v" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.406548 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5n4xh\" (UniqueName: \"kubernetes.io/projected/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d-kube-api-access-5n4xh\") pod \"redhat-marketplace-kxz7v\" (UID: \"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d\") " pod="openshift-marketplace/redhat-marketplace-kxz7v" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.406618 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d-catalog-content\") pod \"redhat-marketplace-kxz7v\" (UID: \"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d\") " pod="openshift-marketplace/redhat-marketplace-kxz7v" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.440687 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5n4xh\" (UniqueName: \"kubernetes.io/projected/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d-kube-api-access-5n4xh\") pod \"redhat-marketplace-kxz7v\" (UID: \"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d\") " pod="openshift-marketplace/redhat-marketplace-kxz7v" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.516811 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kxz7v" Nov 28 11:23:46 crc kubenswrapper[4923]: I1128 11:23:46.924134 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kxz7v"] Nov 28 11:23:47 crc kubenswrapper[4923]: I1128 11:23:47.389754 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kxz7v" event={"ID":"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d","Type":"ContainerStarted","Data":"f11a46e34c0180c31b973bdb46b2e4e36b0111755f786f21b6acd9be25dd21f0"} Nov 28 11:23:48 crc kubenswrapper[4923]: I1128 11:23:48.398576 4923 generic.go:334] "Generic (PLEG): container finished" podID="6e7e489c-df5a-4aae-bf95-68ee1dc8a47d" containerID="ffc014bfc62d7a3aff3f4d3b3468d640bcf335748ca589271f726ee9406ac3f6" exitCode=0 Nov 28 11:23:48 crc kubenswrapper[4923]: I1128 11:23:48.398652 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kxz7v" event={"ID":"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d","Type":"ContainerDied","Data":"ffc014bfc62d7a3aff3f4d3b3468d640bcf335748ca589271f726ee9406ac3f6"} Nov 28 11:23:48 crc kubenswrapper[4923]: I1128 11:23:48.652096 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-ld8vz" Nov 28 11:23:49 crc kubenswrapper[4923]: I1128 11:23:49.408923 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kxz7v" event={"ID":"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d","Type":"ContainerStarted","Data":"c31a6c1009dcb9165222e0c93f5a6018933cfbc13404281fa57c0cb1c56bd4b5"} Nov 28 11:23:50 crc kubenswrapper[4923]: I1128 11:23:50.421764 4923 generic.go:334] "Generic (PLEG): container finished" podID="6e7e489c-df5a-4aae-bf95-68ee1dc8a47d" containerID="c31a6c1009dcb9165222e0c93f5a6018933cfbc13404281fa57c0cb1c56bd4b5" exitCode=0 Nov 28 11:23:50 crc kubenswrapper[4923]: I1128 11:23:50.421838 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kxz7v" event={"ID":"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d","Type":"ContainerDied","Data":"c31a6c1009dcb9165222e0c93f5a6018933cfbc13404281fa57c0cb1c56bd4b5"} Nov 28 11:23:50 crc kubenswrapper[4923]: I1128 11:23:50.572857 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-nmwq5" Nov 28 11:23:50 crc kubenswrapper[4923]: I1128 11:23:50.573228 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-nmwq5" Nov 28 11:23:50 crc kubenswrapper[4923]: I1128 11:23:50.620497 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-nmwq5" Nov 28 11:23:51 crc kubenswrapper[4923]: I1128 11:23:51.431727 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kxz7v" event={"ID":"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d","Type":"ContainerStarted","Data":"d9a0f0550e949cdfb9f4bb79e2109cf2dfb1005a97c9dbadfb6bb23265a681e1"} Nov 28 11:23:51 crc kubenswrapper[4923]: I1128 11:23:51.453636 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kxz7v" podStartSLOduration=2.892345288 podStartE2EDuration="5.453616057s" podCreationTimestamp="2025-11-28 11:23:46 +0000 UTC" firstStartedPulling="2025-11-28 11:23:48.400513657 +0000 UTC m=+907.529197887" lastFinishedPulling="2025-11-28 11:23:50.961784416 +0000 UTC m=+910.090468656" observedRunningTime="2025-11-28 11:23:51.452517186 +0000 UTC m=+910.581201446" watchObservedRunningTime="2025-11-28 11:23:51.453616057 +0000 UTC m=+910.582300297" Nov 28 11:23:51 crc kubenswrapper[4923]: I1128 11:23:51.484591 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-nmwq5" Nov 28 11:23:53 crc kubenswrapper[4923]: I1128 11:23:53.635241 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl"] Nov 28 11:23:53 crc kubenswrapper[4923]: I1128 11:23:53.637817 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" Nov 28 11:23:53 crc kubenswrapper[4923]: I1128 11:23:53.642384 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-qvvcw" Nov 28 11:23:53 crc kubenswrapper[4923]: I1128 11:23:53.659776 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl"] Nov 28 11:23:53 crc kubenswrapper[4923]: I1128 11:23:53.715463 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9f8r\" (UniqueName: \"kubernetes.io/projected/f0dbc892-54ce-4fdc-8989-a68112853524-kube-api-access-t9f8r\") pod \"407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl\" (UID: \"f0dbc892-54ce-4fdc-8989-a68112853524\") " pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" Nov 28 11:23:53 crc kubenswrapper[4923]: I1128 11:23:53.715660 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f0dbc892-54ce-4fdc-8989-a68112853524-util\") pod \"407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl\" (UID: \"f0dbc892-54ce-4fdc-8989-a68112853524\") " pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" Nov 28 11:23:53 crc kubenswrapper[4923]: I1128 11:23:53.715837 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f0dbc892-54ce-4fdc-8989-a68112853524-bundle\") pod \"407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl\" (UID: \"f0dbc892-54ce-4fdc-8989-a68112853524\") " pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" Nov 28 11:23:53 crc kubenswrapper[4923]: E1128 11:23:53.772760 4923 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c55fa30_52ca_462f_871b_85851e250a99.slice/crio-848ba12c69a9eacbe11ea3490ae46184ddc68ab2e2cfb97289128fabc175b9ac\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c55fa30_52ca_462f_871b_85851e250a99.slice\": RecentStats: unable to find data in memory cache]" Nov 28 11:23:53 crc kubenswrapper[4923]: I1128 11:23:53.816857 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f0dbc892-54ce-4fdc-8989-a68112853524-bundle\") pod \"407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl\" (UID: \"f0dbc892-54ce-4fdc-8989-a68112853524\") " pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" Nov 28 11:23:53 crc kubenswrapper[4923]: I1128 11:23:53.817157 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9f8r\" (UniqueName: \"kubernetes.io/projected/f0dbc892-54ce-4fdc-8989-a68112853524-kube-api-access-t9f8r\") pod \"407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl\" (UID: \"f0dbc892-54ce-4fdc-8989-a68112853524\") " pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" Nov 28 11:23:53 crc kubenswrapper[4923]: I1128 11:23:53.817269 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f0dbc892-54ce-4fdc-8989-a68112853524-util\") pod \"407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl\" (UID: \"f0dbc892-54ce-4fdc-8989-a68112853524\") " pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" Nov 28 11:23:53 crc kubenswrapper[4923]: I1128 11:23:53.817341 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f0dbc892-54ce-4fdc-8989-a68112853524-bundle\") pod \"407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl\" (UID: \"f0dbc892-54ce-4fdc-8989-a68112853524\") " pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" Nov 28 11:23:53 crc kubenswrapper[4923]: I1128 11:23:53.817872 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f0dbc892-54ce-4fdc-8989-a68112853524-util\") pod \"407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl\" (UID: \"f0dbc892-54ce-4fdc-8989-a68112853524\") " pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" Nov 28 11:23:53 crc kubenswrapper[4923]: I1128 11:23:53.839386 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9f8r\" (UniqueName: \"kubernetes.io/projected/f0dbc892-54ce-4fdc-8989-a68112853524-kube-api-access-t9f8r\") pod \"407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl\" (UID: \"f0dbc892-54ce-4fdc-8989-a68112853524\") " pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" Nov 28 11:23:53 crc kubenswrapper[4923]: I1128 11:23:53.964288 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" Nov 28 11:23:54 crc kubenswrapper[4923]: I1128 11:23:54.208550 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl"] Nov 28 11:23:54 crc kubenswrapper[4923]: I1128 11:23:54.457452 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" event={"ID":"f0dbc892-54ce-4fdc-8989-a68112853524","Type":"ContainerStarted","Data":"d4d2f5160b532da9d36cb9bd9e264c2f563b0c2c7a0c66b06fa2eca277d0cee5"} Nov 28 11:23:54 crc kubenswrapper[4923]: I1128 11:23:54.457747 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" event={"ID":"f0dbc892-54ce-4fdc-8989-a68112853524","Type":"ContainerStarted","Data":"5210508dc4a8942f7f8a785821a936fc22240d5e84dc191fc371dae1023d5993"} Nov 28 11:23:55 crc kubenswrapper[4923]: I1128 11:23:55.468962 4923 generic.go:334] "Generic (PLEG): container finished" podID="f0dbc892-54ce-4fdc-8989-a68112853524" containerID="d4d2f5160b532da9d36cb9bd9e264c2f563b0c2c7a0c66b06fa2eca277d0cee5" exitCode=0 Nov 28 11:23:55 crc kubenswrapper[4923]: I1128 11:23:55.469010 4923 generic.go:334] "Generic (PLEG): container finished" podID="f0dbc892-54ce-4fdc-8989-a68112853524" containerID="6cbd8768dbd4a1fb8270ef9bfbc84eace0f443fa157517484b05bee2fcfa1646" exitCode=0 Nov 28 11:23:55 crc kubenswrapper[4923]: I1128 11:23:55.469042 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" event={"ID":"f0dbc892-54ce-4fdc-8989-a68112853524","Type":"ContainerDied","Data":"d4d2f5160b532da9d36cb9bd9e264c2f563b0c2c7a0c66b06fa2eca277d0cee5"} Nov 28 11:23:55 crc kubenswrapper[4923]: I1128 11:23:55.469078 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" event={"ID":"f0dbc892-54ce-4fdc-8989-a68112853524","Type":"ContainerDied","Data":"6cbd8768dbd4a1fb8270ef9bfbc84eace0f443fa157517484b05bee2fcfa1646"} Nov 28 11:23:56 crc kubenswrapper[4923]: I1128 11:23:56.478716 4923 generic.go:334] "Generic (PLEG): container finished" podID="f0dbc892-54ce-4fdc-8989-a68112853524" containerID="318a06a7c45216d0833146f80221d9366826518623b3914097b397c907f23f85" exitCode=0 Nov 28 11:23:56 crc kubenswrapper[4923]: I1128 11:23:56.478763 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" event={"ID":"f0dbc892-54ce-4fdc-8989-a68112853524","Type":"ContainerDied","Data":"318a06a7c45216d0833146f80221d9366826518623b3914097b397c907f23f85"} Nov 28 11:23:56 crc kubenswrapper[4923]: I1128 11:23:56.517232 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kxz7v" Nov 28 11:23:56 crc kubenswrapper[4923]: I1128 11:23:56.517904 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kxz7v" Nov 28 11:23:56 crc kubenswrapper[4923]: I1128 11:23:56.562505 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kxz7v" Nov 28 11:23:57 crc kubenswrapper[4923]: I1128 11:23:57.572967 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kxz7v" Nov 28 11:23:57 crc kubenswrapper[4923]: I1128 11:23:57.845738 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" Nov 28 11:23:58 crc kubenswrapper[4923]: I1128 11:23:58.000155 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f0dbc892-54ce-4fdc-8989-a68112853524-bundle\") pod \"f0dbc892-54ce-4fdc-8989-a68112853524\" (UID: \"f0dbc892-54ce-4fdc-8989-a68112853524\") " Nov 28 11:23:58 crc kubenswrapper[4923]: I1128 11:23:58.000261 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9f8r\" (UniqueName: \"kubernetes.io/projected/f0dbc892-54ce-4fdc-8989-a68112853524-kube-api-access-t9f8r\") pod \"f0dbc892-54ce-4fdc-8989-a68112853524\" (UID: \"f0dbc892-54ce-4fdc-8989-a68112853524\") " Nov 28 11:23:58 crc kubenswrapper[4923]: I1128 11:23:58.000299 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f0dbc892-54ce-4fdc-8989-a68112853524-util\") pod \"f0dbc892-54ce-4fdc-8989-a68112853524\" (UID: \"f0dbc892-54ce-4fdc-8989-a68112853524\") " Nov 28 11:23:58 crc kubenswrapper[4923]: I1128 11:23:58.001385 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f0dbc892-54ce-4fdc-8989-a68112853524-bundle" (OuterVolumeSpecName: "bundle") pod "f0dbc892-54ce-4fdc-8989-a68112853524" (UID: "f0dbc892-54ce-4fdc-8989-a68112853524"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:23:58 crc kubenswrapper[4923]: I1128 11:23:58.008812 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0dbc892-54ce-4fdc-8989-a68112853524-kube-api-access-t9f8r" (OuterVolumeSpecName: "kube-api-access-t9f8r") pod "f0dbc892-54ce-4fdc-8989-a68112853524" (UID: "f0dbc892-54ce-4fdc-8989-a68112853524"). InnerVolumeSpecName "kube-api-access-t9f8r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:23:58 crc kubenswrapper[4923]: I1128 11:23:58.032472 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f0dbc892-54ce-4fdc-8989-a68112853524-util" (OuterVolumeSpecName: "util") pod "f0dbc892-54ce-4fdc-8989-a68112853524" (UID: "f0dbc892-54ce-4fdc-8989-a68112853524"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:23:58 crc kubenswrapper[4923]: I1128 11:23:58.102359 4923 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f0dbc892-54ce-4fdc-8989-a68112853524-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:23:58 crc kubenswrapper[4923]: I1128 11:23:58.102403 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9f8r\" (UniqueName: \"kubernetes.io/projected/f0dbc892-54ce-4fdc-8989-a68112853524-kube-api-access-t9f8r\") on node \"crc\" DevicePath \"\"" Nov 28 11:23:58 crc kubenswrapper[4923]: I1128 11:23:58.102425 4923 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f0dbc892-54ce-4fdc-8989-a68112853524-util\") on node \"crc\" DevicePath \"\"" Nov 28 11:23:58 crc kubenswrapper[4923]: I1128 11:23:58.501057 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" event={"ID":"f0dbc892-54ce-4fdc-8989-a68112853524","Type":"ContainerDied","Data":"5210508dc4a8942f7f8a785821a936fc22240d5e84dc191fc371dae1023d5993"} Nov 28 11:23:58 crc kubenswrapper[4923]: I1128 11:23:58.501395 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5210508dc4a8942f7f8a785821a936fc22240d5e84dc191fc371dae1023d5993" Nov 28 11:23:58 crc kubenswrapper[4923]: I1128 11:23:58.501122 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl" Nov 28 11:23:59 crc kubenswrapper[4923]: I1128 11:23:59.384879 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kxz7v"] Nov 28 11:24:00 crc kubenswrapper[4923]: I1128 11:24:00.515528 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kxz7v" podUID="6e7e489c-df5a-4aae-bf95-68ee1dc8a47d" containerName="registry-server" containerID="cri-o://d9a0f0550e949cdfb9f4bb79e2109cf2dfb1005a97c9dbadfb6bb23265a681e1" gracePeriod=2 Nov 28 11:24:00 crc kubenswrapper[4923]: I1128 11:24:00.935481 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kxz7v" Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.052564 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d-catalog-content\") pod \"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d\" (UID: \"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d\") " Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.052606 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d-utilities\") pod \"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d\" (UID: \"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d\") " Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.052668 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5n4xh\" (UniqueName: \"kubernetes.io/projected/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d-kube-api-access-5n4xh\") pod \"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d\" (UID: \"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d\") " Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.054223 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d-utilities" (OuterVolumeSpecName: "utilities") pod "6e7e489c-df5a-4aae-bf95-68ee1dc8a47d" (UID: "6e7e489c-df5a-4aae-bf95-68ee1dc8a47d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.059296 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d-kube-api-access-5n4xh" (OuterVolumeSpecName: "kube-api-access-5n4xh") pod "6e7e489c-df5a-4aae-bf95-68ee1dc8a47d" (UID: "6e7e489c-df5a-4aae-bf95-68ee1dc8a47d"). InnerVolumeSpecName "kube-api-access-5n4xh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.074698 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6e7e489c-df5a-4aae-bf95-68ee1dc8a47d" (UID: "6e7e489c-df5a-4aae-bf95-68ee1dc8a47d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.154651 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.154902 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.154981 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5n4xh\" (UniqueName: \"kubernetes.io/projected/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d-kube-api-access-5n4xh\") on node \"crc\" DevicePath \"\"" Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.521618 4923 generic.go:334] "Generic (PLEG): container finished" podID="6e7e489c-df5a-4aae-bf95-68ee1dc8a47d" containerID="d9a0f0550e949cdfb9f4bb79e2109cf2dfb1005a97c9dbadfb6bb23265a681e1" exitCode=0 Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.521664 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kxz7v" event={"ID":"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d","Type":"ContainerDied","Data":"d9a0f0550e949cdfb9f4bb79e2109cf2dfb1005a97c9dbadfb6bb23265a681e1"} Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.521692 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kxz7v" event={"ID":"6e7e489c-df5a-4aae-bf95-68ee1dc8a47d","Type":"ContainerDied","Data":"f11a46e34c0180c31b973bdb46b2e4e36b0111755f786f21b6acd9be25dd21f0"} Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.521689 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kxz7v" Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.521712 4923 scope.go:117] "RemoveContainer" containerID="d9a0f0550e949cdfb9f4bb79e2109cf2dfb1005a97c9dbadfb6bb23265a681e1" Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.539166 4923 scope.go:117] "RemoveContainer" containerID="c31a6c1009dcb9165222e0c93f5a6018933cfbc13404281fa57c0cb1c56bd4b5" Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.540795 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kxz7v"] Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.550621 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kxz7v"] Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.553896 4923 scope.go:117] "RemoveContainer" containerID="ffc014bfc62d7a3aff3f4d3b3468d640bcf335748ca589271f726ee9406ac3f6" Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.571455 4923 scope.go:117] "RemoveContainer" containerID="d9a0f0550e949cdfb9f4bb79e2109cf2dfb1005a97c9dbadfb6bb23265a681e1" Nov 28 11:24:01 crc kubenswrapper[4923]: E1128 11:24:01.571952 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9a0f0550e949cdfb9f4bb79e2109cf2dfb1005a97c9dbadfb6bb23265a681e1\": container with ID starting with d9a0f0550e949cdfb9f4bb79e2109cf2dfb1005a97c9dbadfb6bb23265a681e1 not found: ID does not exist" containerID="d9a0f0550e949cdfb9f4bb79e2109cf2dfb1005a97c9dbadfb6bb23265a681e1" Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.571995 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9a0f0550e949cdfb9f4bb79e2109cf2dfb1005a97c9dbadfb6bb23265a681e1"} err="failed to get container status \"d9a0f0550e949cdfb9f4bb79e2109cf2dfb1005a97c9dbadfb6bb23265a681e1\": rpc error: code = NotFound desc = could not find container \"d9a0f0550e949cdfb9f4bb79e2109cf2dfb1005a97c9dbadfb6bb23265a681e1\": container with ID starting with d9a0f0550e949cdfb9f4bb79e2109cf2dfb1005a97c9dbadfb6bb23265a681e1 not found: ID does not exist" Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.572026 4923 scope.go:117] "RemoveContainer" containerID="c31a6c1009dcb9165222e0c93f5a6018933cfbc13404281fa57c0cb1c56bd4b5" Nov 28 11:24:01 crc kubenswrapper[4923]: E1128 11:24:01.572428 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c31a6c1009dcb9165222e0c93f5a6018933cfbc13404281fa57c0cb1c56bd4b5\": container with ID starting with c31a6c1009dcb9165222e0c93f5a6018933cfbc13404281fa57c0cb1c56bd4b5 not found: ID does not exist" containerID="c31a6c1009dcb9165222e0c93f5a6018933cfbc13404281fa57c0cb1c56bd4b5" Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.572448 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c31a6c1009dcb9165222e0c93f5a6018933cfbc13404281fa57c0cb1c56bd4b5"} err="failed to get container status \"c31a6c1009dcb9165222e0c93f5a6018933cfbc13404281fa57c0cb1c56bd4b5\": rpc error: code = NotFound desc = could not find container \"c31a6c1009dcb9165222e0c93f5a6018933cfbc13404281fa57c0cb1c56bd4b5\": container with ID starting with c31a6c1009dcb9165222e0c93f5a6018933cfbc13404281fa57c0cb1c56bd4b5 not found: ID does not exist" Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.572461 4923 scope.go:117] "RemoveContainer" containerID="ffc014bfc62d7a3aff3f4d3b3468d640bcf335748ca589271f726ee9406ac3f6" Nov 28 11:24:01 crc kubenswrapper[4923]: E1128 11:24:01.572709 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffc014bfc62d7a3aff3f4d3b3468d640bcf335748ca589271f726ee9406ac3f6\": container with ID starting with ffc014bfc62d7a3aff3f4d3b3468d640bcf335748ca589271f726ee9406ac3f6 not found: ID does not exist" containerID="ffc014bfc62d7a3aff3f4d3b3468d640bcf335748ca589271f726ee9406ac3f6" Nov 28 11:24:01 crc kubenswrapper[4923]: I1128 11:24:01.572731 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffc014bfc62d7a3aff3f4d3b3468d640bcf335748ca589271f726ee9406ac3f6"} err="failed to get container status \"ffc014bfc62d7a3aff3f4d3b3468d640bcf335748ca589271f726ee9406ac3f6\": rpc error: code = NotFound desc = could not find container \"ffc014bfc62d7a3aff3f4d3b3468d640bcf335748ca589271f726ee9406ac3f6\": container with ID starting with ffc014bfc62d7a3aff3f4d3b3468d640bcf335748ca589271f726ee9406ac3f6 not found: ID does not exist" Nov 28 11:24:03 crc kubenswrapper[4923]: I1128 11:24:03.175616 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e7e489c-df5a-4aae-bf95-68ee1dc8a47d" path="/var/lib/kubelet/pods/6e7e489c-df5a-4aae-bf95-68ee1dc8a47d/volumes" Nov 28 11:24:03 crc kubenswrapper[4923]: E1128 11:24:03.904444 4923 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c55fa30_52ca_462f_871b_85851e250a99.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c55fa30_52ca_462f_871b_85851e250a99.slice/crio-848ba12c69a9eacbe11ea3490ae46184ddc68ab2e2cfb97289128fabc175b9ac\": RecentStats: unable to find data in memory cache]" Nov 28 11:24:03 crc kubenswrapper[4923]: I1128 11:24:03.951879 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7f586794b9-kkz9j"] Nov 28 11:24:03 crc kubenswrapper[4923]: E1128 11:24:03.952403 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e7e489c-df5a-4aae-bf95-68ee1dc8a47d" containerName="extract-utilities" Nov 28 11:24:03 crc kubenswrapper[4923]: I1128 11:24:03.952414 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e7e489c-df5a-4aae-bf95-68ee1dc8a47d" containerName="extract-utilities" Nov 28 11:24:03 crc kubenswrapper[4923]: E1128 11:24:03.952424 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0dbc892-54ce-4fdc-8989-a68112853524" containerName="extract" Nov 28 11:24:03 crc kubenswrapper[4923]: I1128 11:24:03.952429 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0dbc892-54ce-4fdc-8989-a68112853524" containerName="extract" Nov 28 11:24:03 crc kubenswrapper[4923]: E1128 11:24:03.952440 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e7e489c-df5a-4aae-bf95-68ee1dc8a47d" containerName="registry-server" Nov 28 11:24:03 crc kubenswrapper[4923]: I1128 11:24:03.952446 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e7e489c-df5a-4aae-bf95-68ee1dc8a47d" containerName="registry-server" Nov 28 11:24:03 crc kubenswrapper[4923]: E1128 11:24:03.952454 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e7e489c-df5a-4aae-bf95-68ee1dc8a47d" containerName="extract-content" Nov 28 11:24:03 crc kubenswrapper[4923]: I1128 11:24:03.952460 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e7e489c-df5a-4aae-bf95-68ee1dc8a47d" containerName="extract-content" Nov 28 11:24:03 crc kubenswrapper[4923]: E1128 11:24:03.952474 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0dbc892-54ce-4fdc-8989-a68112853524" containerName="util" Nov 28 11:24:03 crc kubenswrapper[4923]: I1128 11:24:03.952480 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0dbc892-54ce-4fdc-8989-a68112853524" containerName="util" Nov 28 11:24:03 crc kubenswrapper[4923]: E1128 11:24:03.952489 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0dbc892-54ce-4fdc-8989-a68112853524" containerName="pull" Nov 28 11:24:03 crc kubenswrapper[4923]: I1128 11:24:03.952494 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0dbc892-54ce-4fdc-8989-a68112853524" containerName="pull" Nov 28 11:24:03 crc kubenswrapper[4923]: I1128 11:24:03.952602 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e7e489c-df5a-4aae-bf95-68ee1dc8a47d" containerName="registry-server" Nov 28 11:24:03 crc kubenswrapper[4923]: I1128 11:24:03.952612 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0dbc892-54ce-4fdc-8989-a68112853524" containerName="extract" Nov 28 11:24:03 crc kubenswrapper[4923]: I1128 11:24:03.952993 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7f586794b9-kkz9j" Nov 28 11:24:03 crc kubenswrapper[4923]: I1128 11:24:03.954790 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-b89lc" Nov 28 11:24:03 crc kubenswrapper[4923]: I1128 11:24:03.984062 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7f586794b9-kkz9j"] Nov 28 11:24:04 crc kubenswrapper[4923]: I1128 11:24:03.997991 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpnfq\" (UniqueName: \"kubernetes.io/projected/6a8160fc-9979-4e9e-a289-2c4e0a728f1c-kube-api-access-zpnfq\") pod \"openstack-operator-controller-operator-7f586794b9-kkz9j\" (UID: \"6a8160fc-9979-4e9e-a289-2c4e0a728f1c\") " pod="openstack-operators/openstack-operator-controller-operator-7f586794b9-kkz9j" Nov 28 11:24:04 crc kubenswrapper[4923]: I1128 11:24:04.099134 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpnfq\" (UniqueName: \"kubernetes.io/projected/6a8160fc-9979-4e9e-a289-2c4e0a728f1c-kube-api-access-zpnfq\") pod \"openstack-operator-controller-operator-7f586794b9-kkz9j\" (UID: \"6a8160fc-9979-4e9e-a289-2c4e0a728f1c\") " pod="openstack-operators/openstack-operator-controller-operator-7f586794b9-kkz9j" Nov 28 11:24:04 crc kubenswrapper[4923]: I1128 11:24:04.122680 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpnfq\" (UniqueName: \"kubernetes.io/projected/6a8160fc-9979-4e9e-a289-2c4e0a728f1c-kube-api-access-zpnfq\") pod \"openstack-operator-controller-operator-7f586794b9-kkz9j\" (UID: \"6a8160fc-9979-4e9e-a289-2c4e0a728f1c\") " pod="openstack-operators/openstack-operator-controller-operator-7f586794b9-kkz9j" Nov 28 11:24:04 crc kubenswrapper[4923]: I1128 11:24:04.268664 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7f586794b9-kkz9j" Nov 28 11:24:04 crc kubenswrapper[4923]: I1128 11:24:04.486012 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7f586794b9-kkz9j"] Nov 28 11:24:04 crc kubenswrapper[4923]: W1128 11:24:04.496224 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a8160fc_9979_4e9e_a289_2c4e0a728f1c.slice/crio-4c20347551e5184d1659853dad6043973360c77b320126f30c281d87f33d3239 WatchSource:0}: Error finding container 4c20347551e5184d1659853dad6043973360c77b320126f30c281d87f33d3239: Status 404 returned error can't find the container with id 4c20347551e5184d1659853dad6043973360c77b320126f30c281d87f33d3239 Nov 28 11:24:04 crc kubenswrapper[4923]: I1128 11:24:04.554795 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7f586794b9-kkz9j" event={"ID":"6a8160fc-9979-4e9e-a289-2c4e0a728f1c","Type":"ContainerStarted","Data":"4c20347551e5184d1659853dad6043973360c77b320126f30c281d87f33d3239"} Nov 28 11:24:09 crc kubenswrapper[4923]: I1128 11:24:09.599480 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7f586794b9-kkz9j" event={"ID":"6a8160fc-9979-4e9e-a289-2c4e0a728f1c","Type":"ContainerStarted","Data":"79e86962fb01bf1678acd37c159f57101bb19fd6844a02fff2c7b3591d42c5c7"} Nov 28 11:24:09 crc kubenswrapper[4923]: I1128 11:24:09.600274 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-7f586794b9-kkz9j" Nov 28 11:24:09 crc kubenswrapper[4923]: I1128 11:24:09.643895 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-7f586794b9-kkz9j" podStartSLOduration=2.402375737 podStartE2EDuration="6.643880541s" podCreationTimestamp="2025-11-28 11:24:03 +0000 UTC" firstStartedPulling="2025-11-28 11:24:04.499145188 +0000 UTC m=+923.627829398" lastFinishedPulling="2025-11-28 11:24:08.740650002 +0000 UTC m=+927.869334202" observedRunningTime="2025-11-28 11:24:09.635510855 +0000 UTC m=+928.764195065" watchObservedRunningTime="2025-11-28 11:24:09.643880541 +0000 UTC m=+928.772564751" Nov 28 11:24:14 crc kubenswrapper[4923]: I1128 11:24:14.026741 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:24:14 crc kubenswrapper[4923]: I1128 11:24:14.027173 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:24:14 crc kubenswrapper[4923]: E1128 11:24:14.063240 4923 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c55fa30_52ca_462f_871b_85851e250a99.slice/crio-848ba12c69a9eacbe11ea3490ae46184ddc68ab2e2cfb97289128fabc175b9ac\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c55fa30_52ca_462f_871b_85851e250a99.slice\": RecentStats: unable to find data in memory cache]" Nov 28 11:24:14 crc kubenswrapper[4923]: I1128 11:24:14.276816 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-7f586794b9-kkz9j" Nov 28 11:24:24 crc kubenswrapper[4923]: E1128 11:24:24.252031 4923 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c55fa30_52ca_462f_871b_85851e250a99.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c55fa30_52ca_462f_871b_85851e250a99.slice/crio-848ba12c69a9eacbe11ea3490ae46184ddc68ab2e2cfb97289128fabc175b9ac\": RecentStats: unable to find data in memory cache]" Nov 28 11:24:32 crc kubenswrapper[4923]: I1128 11:24:32.932523 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-n887x"] Nov 28 11:24:32 crc kubenswrapper[4923]: I1128 11:24:32.935194 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n887x" Nov 28 11:24:32 crc kubenswrapper[4923]: I1128 11:24:32.942385 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n887x"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.011210 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzt92\" (UniqueName: \"kubernetes.io/projected/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab-kube-api-access-rzt92\") pod \"certified-operators-n887x\" (UID: \"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab\") " pod="openshift-marketplace/certified-operators-n887x" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.011314 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab-catalog-content\") pod \"certified-operators-n887x\" (UID: \"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab\") " pod="openshift-marketplace/certified-operators-n887x" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.011365 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab-utilities\") pod \"certified-operators-n887x\" (UID: \"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab\") " pod="openshift-marketplace/certified-operators-n887x" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.112518 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab-catalog-content\") pod \"certified-operators-n887x\" (UID: \"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab\") " pod="openshift-marketplace/certified-operators-n887x" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.112609 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab-utilities\") pod \"certified-operators-n887x\" (UID: \"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab\") " pod="openshift-marketplace/certified-operators-n887x" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.112656 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzt92\" (UniqueName: \"kubernetes.io/projected/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab-kube-api-access-rzt92\") pod \"certified-operators-n887x\" (UID: \"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab\") " pod="openshift-marketplace/certified-operators-n887x" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.113055 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab-catalog-content\") pod \"certified-operators-n887x\" (UID: \"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab\") " pod="openshift-marketplace/certified-operators-n887x" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.113294 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab-utilities\") pod \"certified-operators-n887x\" (UID: \"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab\") " pod="openshift-marketplace/certified-operators-n887x" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.136717 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzt92\" (UniqueName: \"kubernetes.io/projected/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab-kube-api-access-rzt92\") pod \"certified-operators-n887x\" (UID: \"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab\") " pod="openshift-marketplace/certified-operators-n887x" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.241335 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-kbm2r"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.242159 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-kbm2r" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.244918 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-rrrpc" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.250943 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-p9k2s"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.251821 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-p9k2s" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.257676 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-8gpk7" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.264489 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-kbm2r"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.265140 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n887x" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.268077 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-p9k2s"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.299853 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-8ftq2"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.300681 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-8ftq2" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.303228 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-8m4vf" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.329771 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-8ftq2"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.349326 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-kmtzj"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.350351 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-kmtzj" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.352996 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-5stsn" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.387032 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-jtpmz"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.417425 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-jtpmz" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.424344 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zq65v\" (UniqueName: \"kubernetes.io/projected/09a1376c-00d7-4540-a905-078c297241cb-kube-api-access-zq65v\") pod \"cinder-operator-controller-manager-6b7f75547b-p9k2s\" (UID: \"09a1376c-00d7-4540-a905-078c297241cb\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-p9k2s" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.424550 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjz7x\" (UniqueName: \"kubernetes.io/projected/e469dd36-fba4-4342-8fd6-ef847f821393-kube-api-access-zjz7x\") pod \"designate-operator-controller-manager-955677c94-8ftq2\" (UID: \"e469dd36-fba4-4342-8fd6-ef847f821393\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-8ftq2" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.424634 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xn2pm\" (UniqueName: \"kubernetes.io/projected/eb007735-97dd-4d13-9b3d-28adefb557e1-kube-api-access-xn2pm\") pod \"barbican-operator-controller-manager-7b64f4fb85-kbm2r\" (UID: \"eb007735-97dd-4d13-9b3d-28adefb557e1\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-kbm2r" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.426026 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-h5xxb" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.465551 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-kmtzj"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.476402 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-jtpmz"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.511441 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-98w4g"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.512872 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-98w4g" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.520071 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-j7dbc" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.528501 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zq65v\" (UniqueName: \"kubernetes.io/projected/09a1376c-00d7-4540-a905-078c297241cb-kube-api-access-zq65v\") pod \"cinder-operator-controller-manager-6b7f75547b-p9k2s\" (UID: \"09a1376c-00d7-4540-a905-078c297241cb\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-p9k2s" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.528551 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjz7x\" (UniqueName: \"kubernetes.io/projected/e469dd36-fba4-4342-8fd6-ef847f821393-kube-api-access-zjz7x\") pod \"designate-operator-controller-manager-955677c94-8ftq2\" (UID: \"e469dd36-fba4-4342-8fd6-ef847f821393\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-8ftq2" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.528571 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xn2pm\" (UniqueName: \"kubernetes.io/projected/eb007735-97dd-4d13-9b3d-28adefb557e1-kube-api-access-xn2pm\") pod \"barbican-operator-controller-manager-7b64f4fb85-kbm2r\" (UID: \"eb007735-97dd-4d13-9b3d-28adefb557e1\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-kbm2r" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.528619 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgzjk\" (UniqueName: \"kubernetes.io/projected/2ef6846b-733b-4c63-8add-5c3251658a7e-kube-api-access-wgzjk\") pod \"glance-operator-controller-manager-589cbd6b5b-kmtzj\" (UID: \"2ef6846b-733b-4c63-8add-5c3251658a7e\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-kmtzj" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.528655 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsl4q\" (UniqueName: \"kubernetes.io/projected/2ea0b3c3-a5d4-4b2f-81ef-a52573d37e06-kube-api-access-nsl4q\") pod \"heat-operator-controller-manager-5b77f656f-jtpmz\" (UID: \"2ea0b3c3-a5d4-4b2f-81ef-a52573d37e06\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-jtpmz" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.529642 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-98w4g"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.574919 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-2snwf"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.575967 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.589265 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-s4jvb" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.589661 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.590611 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xn2pm\" (UniqueName: \"kubernetes.io/projected/eb007735-97dd-4d13-9b3d-28adefb557e1-kube-api-access-xn2pm\") pod \"barbican-operator-controller-manager-7b64f4fb85-kbm2r\" (UID: \"eb007735-97dd-4d13-9b3d-28adefb557e1\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-kbm2r" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.611508 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-j72jx"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.612547 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-j72jx" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.626684 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-7rsmz" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.630305 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cb666\" (UniqueName: \"kubernetes.io/projected/ec4c9bb0-95fa-4840-8b48-de2b822bb788-kube-api-access-cb666\") pod \"infra-operator-controller-manager-57548d458d-2snwf\" (UID: \"ec4c9bb0-95fa-4840-8b48-de2b822bb788\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.630345 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgzjk\" (UniqueName: \"kubernetes.io/projected/2ef6846b-733b-4c63-8add-5c3251658a7e-kube-api-access-wgzjk\") pod \"glance-operator-controller-manager-589cbd6b5b-kmtzj\" (UID: \"2ef6846b-733b-4c63-8add-5c3251658a7e\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-kmtzj" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.630378 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ec4c9bb0-95fa-4840-8b48-de2b822bb788-cert\") pod \"infra-operator-controller-manager-57548d458d-2snwf\" (UID: \"ec4c9bb0-95fa-4840-8b48-de2b822bb788\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.630398 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5m446\" (UniqueName: \"kubernetes.io/projected/a813ab03-9734-4a76-aef0-62c7606c85d5-kube-api-access-5m446\") pod \"horizon-operator-controller-manager-5d494799bf-98w4g\" (UID: \"a813ab03-9734-4a76-aef0-62c7606c85d5\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-98w4g" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.630420 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsl4q\" (UniqueName: \"kubernetes.io/projected/2ea0b3c3-a5d4-4b2f-81ef-a52573d37e06-kube-api-access-nsl4q\") pod \"heat-operator-controller-manager-5b77f656f-jtpmz\" (UID: \"2ea0b3c3-a5d4-4b2f-81ef-a52573d37e06\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-jtpmz" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.631731 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjz7x\" (UniqueName: \"kubernetes.io/projected/e469dd36-fba4-4342-8fd6-ef847f821393-kube-api-access-zjz7x\") pod \"designate-operator-controller-manager-955677c94-8ftq2\" (UID: \"e469dd36-fba4-4342-8fd6-ef847f821393\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-8ftq2" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.638374 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zq65v\" (UniqueName: \"kubernetes.io/projected/09a1376c-00d7-4540-a905-078c297241cb-kube-api-access-zq65v\") pod \"cinder-operator-controller-manager-6b7f75547b-p9k2s\" (UID: \"09a1376c-00d7-4540-a905-078c297241cb\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-p9k2s" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.638431 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-2snwf"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.639687 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-8ftq2" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.645629 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-sk9fr"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.646668 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-sk9fr" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.649075 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-j72jx"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.661885 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-7gp2t" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.665891 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgzjk\" (UniqueName: \"kubernetes.io/projected/2ef6846b-733b-4c63-8add-5c3251658a7e-kube-api-access-wgzjk\") pod \"glance-operator-controller-manager-589cbd6b5b-kmtzj\" (UID: \"2ef6846b-733b-4c63-8add-5c3251658a7e\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-kmtzj" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.677128 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsl4q\" (UniqueName: \"kubernetes.io/projected/2ea0b3c3-a5d4-4b2f-81ef-a52573d37e06-kube-api-access-nsl4q\") pod \"heat-operator-controller-manager-5b77f656f-jtpmz\" (UID: \"2ea0b3c3-a5d4-4b2f-81ef-a52573d37e06\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-jtpmz" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.679137 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-v8vn8"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.681409 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-v8vn8" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.685418 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-zdvr5" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.731403 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-kmtzj" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.732286 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhxn8\" (UniqueName: \"kubernetes.io/projected/ed2b1137-a903-4224-b706-304a2f416007-kube-api-access-dhxn8\") pod \"manila-operator-controller-manager-5d499bf58b-v8vn8\" (UID: \"ed2b1137-a903-4224-b706-304a2f416007\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-v8vn8" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.732316 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cb666\" (UniqueName: \"kubernetes.io/projected/ec4c9bb0-95fa-4840-8b48-de2b822bb788-kube-api-access-cb666\") pod \"infra-operator-controller-manager-57548d458d-2snwf\" (UID: \"ec4c9bb0-95fa-4840-8b48-de2b822bb788\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.732336 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ns6mc\" (UniqueName: \"kubernetes.io/projected/c434efb7-70cf-4c94-be0d-9635325d758c-kube-api-access-ns6mc\") pod \"keystone-operator-controller-manager-7b4567c7cf-sk9fr\" (UID: \"c434efb7-70cf-4c94-be0d-9635325d758c\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-sk9fr" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.732358 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnghs\" (UniqueName: \"kubernetes.io/projected/52b8b64e-5401-41ef-8d65-cc275cdaf832-kube-api-access-hnghs\") pod \"ironic-operator-controller-manager-67cb4dc6d4-j72jx\" (UID: \"52b8b64e-5401-41ef-8d65-cc275cdaf832\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-j72jx" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.732390 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ec4c9bb0-95fa-4840-8b48-de2b822bb788-cert\") pod \"infra-operator-controller-manager-57548d458d-2snwf\" (UID: \"ec4c9bb0-95fa-4840-8b48-de2b822bb788\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.732407 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5m446\" (UniqueName: \"kubernetes.io/projected/a813ab03-9734-4a76-aef0-62c7606c85d5-kube-api-access-5m446\") pod \"horizon-operator-controller-manager-5d494799bf-98w4g\" (UID: \"a813ab03-9734-4a76-aef0-62c7606c85d5\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-98w4g" Nov 28 11:24:33 crc kubenswrapper[4923]: E1128 11:24:33.732830 4923 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 11:24:33 crc kubenswrapper[4923]: E1128 11:24:33.732864 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ec4c9bb0-95fa-4840-8b48-de2b822bb788-cert podName:ec4c9bb0-95fa-4840-8b48-de2b822bb788 nodeName:}" failed. No retries permitted until 2025-11-28 11:24:34.232849837 +0000 UTC m=+953.361534047 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ec4c9bb0-95fa-4840-8b48-de2b822bb788-cert") pod "infra-operator-controller-manager-57548d458d-2snwf" (UID: "ec4c9bb0-95fa-4840-8b48-de2b822bb788") : secret "infra-operator-webhook-server-cert" not found Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.748728 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-sk9fr"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.752119 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qc4bb"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.753052 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qc4bb" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.763518 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-69mpv" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.764223 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-jtpmz" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.766570 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5m446\" (UniqueName: \"kubernetes.io/projected/a813ab03-9734-4a76-aef0-62c7606c85d5-kube-api-access-5m446\") pod \"horizon-operator-controller-manager-5d494799bf-98w4g\" (UID: \"a813ab03-9734-4a76-aef0-62c7606c85d5\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-98w4g" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.782163 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cb666\" (UniqueName: \"kubernetes.io/projected/ec4c9bb0-95fa-4840-8b48-de2b822bb788-kube-api-access-cb666\") pod \"infra-operator-controller-manager-57548d458d-2snwf\" (UID: \"ec4c9bb0-95fa-4840-8b48-de2b822bb788\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.785045 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-v8vn8"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.812492 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qc4bb"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.829321 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-98w4g" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.834578 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhxn8\" (UniqueName: \"kubernetes.io/projected/ed2b1137-a903-4224-b706-304a2f416007-kube-api-access-dhxn8\") pod \"manila-operator-controller-manager-5d499bf58b-v8vn8\" (UID: \"ed2b1137-a903-4224-b706-304a2f416007\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-v8vn8" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.834636 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ns6mc\" (UniqueName: \"kubernetes.io/projected/c434efb7-70cf-4c94-be0d-9635325d758c-kube-api-access-ns6mc\") pod \"keystone-operator-controller-manager-7b4567c7cf-sk9fr\" (UID: \"c434efb7-70cf-4c94-be0d-9635325d758c\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-sk9fr" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.834663 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnghs\" (UniqueName: \"kubernetes.io/projected/52b8b64e-5401-41ef-8d65-cc275cdaf832-kube-api-access-hnghs\") pod \"ironic-operator-controller-manager-67cb4dc6d4-j72jx\" (UID: \"52b8b64e-5401-41ef-8d65-cc275cdaf832\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-j72jx" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.834692 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvbvn\" (UniqueName: \"kubernetes.io/projected/bd6c3e5b-2eb9-4f4b-8893-07aab7091fab-kube-api-access-qvbvn\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-qc4bb\" (UID: \"bd6c3e5b-2eb9-4f4b-8893-07aab7091fab\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qc4bb" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.847185 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-9z8n7"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.848083 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-9z8n7" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.857597 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-n6qxn" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.869126 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-p9k2s" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.870320 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-kbm2r" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.871909 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ns6mc\" (UniqueName: \"kubernetes.io/projected/c434efb7-70cf-4c94-be0d-9635325d758c-kube-api-access-ns6mc\") pod \"keystone-operator-controller-manager-7b4567c7cf-sk9fr\" (UID: \"c434efb7-70cf-4c94-be0d-9635325d758c\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-sk9fr" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.883587 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnghs\" (UniqueName: \"kubernetes.io/projected/52b8b64e-5401-41ef-8d65-cc275cdaf832-kube-api-access-hnghs\") pod \"ironic-operator-controller-manager-67cb4dc6d4-j72jx\" (UID: \"52b8b64e-5401-41ef-8d65-cc275cdaf832\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-j72jx" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.944532 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvbvn\" (UniqueName: \"kubernetes.io/projected/bd6c3e5b-2eb9-4f4b-8893-07aab7091fab-kube-api-access-qvbvn\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-qc4bb\" (UID: \"bd6c3e5b-2eb9-4f4b-8893-07aab7091fab\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qc4bb" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.945504 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z96zk\" (UniqueName: \"kubernetes.io/projected/5d041894-6ce7-401f-9b0b-5d5a9e31a68d-kube-api-access-z96zk\") pod \"neutron-operator-controller-manager-6fdcddb789-9z8n7\" (UID: \"5d041894-6ce7-401f-9b0b-5d5a9e31a68d\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-9z8n7" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.945976 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-vvgng"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.955576 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhxn8\" (UniqueName: \"kubernetes.io/projected/ed2b1137-a903-4224-b706-304a2f416007-kube-api-access-dhxn8\") pod \"manila-operator-controller-manager-5d499bf58b-v8vn8\" (UID: \"ed2b1137-a903-4224-b706-304a2f416007\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-v8vn8" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.960134 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vvgng" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.968993 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-ktqqj"] Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.977867 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-ktqqj" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.992625 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-6x5c2" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.992849 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-87xgl" Nov 28 11:24:33 crc kubenswrapper[4923]: I1128 11:24:33.997427 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvbvn\" (UniqueName: \"kubernetes.io/projected/bd6c3e5b-2eb9-4f4b-8893-07aab7091fab-kube-api-access-qvbvn\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-qc4bb\" (UID: \"bd6c3e5b-2eb9-4f4b-8893-07aab7091fab\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qc4bb" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.007517 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-sk9fr" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.019373 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-j72jx" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.019775 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-ktqqj"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.025689 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-v8vn8" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.027666 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-9z8n7"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.045875 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-vvgng"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.046385 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z96zk\" (UniqueName: \"kubernetes.io/projected/5d041894-6ce7-401f-9b0b-5d5a9e31a68d-kube-api-access-z96zk\") pod \"neutron-operator-controller-manager-6fdcddb789-9z8n7\" (UID: \"5d041894-6ce7-401f-9b0b-5d5a9e31a68d\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-9z8n7" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.046466 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtq2h\" (UniqueName: \"kubernetes.io/projected/c2fd1946-a3cb-453e-a1f3-458e14cb35ec-kube-api-access-jtq2h\") pod \"octavia-operator-controller-manager-64cdc6ff96-ktqqj\" (UID: \"c2fd1946-a3cb-453e-a1f3-458e14cb35ec\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-ktqqj" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.046517 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qss75\" (UniqueName: \"kubernetes.io/projected/f9c94487-8e74-4cc0-ac40-834c175a770f-kube-api-access-qss75\") pod \"nova-operator-controller-manager-79556f57fc-vvgng\" (UID: \"f9c94487-8e74-4cc0-ac40-834c175a770f\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vvgng" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.113846 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qc4bb" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.114416 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z96zk\" (UniqueName: \"kubernetes.io/projected/5d041894-6ce7-401f-9b0b-5d5a9e31a68d-kube-api-access-z96zk\") pod \"neutron-operator-controller-manager-6fdcddb789-9z8n7\" (UID: \"5d041894-6ce7-401f-9b0b-5d5a9e31a68d\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-9z8n7" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.124983 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.129243 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.137611 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-s2bsh" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.153582 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.155749 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtq2h\" (UniqueName: \"kubernetes.io/projected/c2fd1946-a3cb-453e-a1f3-458e14cb35ec-kube-api-access-jtq2h\") pod \"octavia-operator-controller-manager-64cdc6ff96-ktqqj\" (UID: \"c2fd1946-a3cb-453e-a1f3-458e14cb35ec\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-ktqqj" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.155820 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qss75\" (UniqueName: \"kubernetes.io/projected/f9c94487-8e74-4cc0-ac40-834c175a770f-kube-api-access-qss75\") pod \"nova-operator-controller-manager-79556f57fc-vvgng\" (UID: \"f9c94487-8e74-4cc0-ac40-834c175a770f\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vvgng" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.184777 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-sv67d"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.187518 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-sv67d" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.198613 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.222497 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-v4csc" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.251270 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-sv67d"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.255187 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-9z8n7" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.282544 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ec4c9bb0-95fa-4840-8b48-de2b822bb788-cert\") pod \"infra-operator-controller-manager-57548d458d-2snwf\" (UID: \"ec4c9bb0-95fa-4840-8b48-de2b822bb788\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.282818 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4c92eac3-127a-4b96-adf3-e3e52ba9015d-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx\" (UID: \"4c92eac3-127a-4b96-adf3-e3e52ba9015d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.282945 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vch2t\" (UniqueName: \"kubernetes.io/projected/4c92eac3-127a-4b96-adf3-e3e52ba9015d-kube-api-access-vch2t\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx\" (UID: \"4c92eac3-127a-4b96-adf3-e3e52ba9015d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.283026 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6qmp\" (UniqueName: \"kubernetes.io/projected/e00c03c3-987c-4a5a-9c6f-2d15cd86a639-kube-api-access-z6qmp\") pod \"ovn-operator-controller-manager-56897c768d-sv67d\" (UID: \"e00c03c3-987c-4a5a-9c6f-2d15cd86a639\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-sv67d" Nov 28 11:24:34 crc kubenswrapper[4923]: E1128 11:24:34.283274 4923 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 11:24:34 crc kubenswrapper[4923]: E1128 11:24:34.283387 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ec4c9bb0-95fa-4840-8b48-de2b822bb788-cert podName:ec4c9bb0-95fa-4840-8b48-de2b822bb788 nodeName:}" failed. No retries permitted until 2025-11-28 11:24:35.283370739 +0000 UTC m=+954.412054949 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ec4c9bb0-95fa-4840-8b48-de2b822bb788-cert") pod "infra-operator-controller-manager-57548d458d-2snwf" (UID: "ec4c9bb0-95fa-4840-8b48-de2b822bb788") : secret "infra-operator-webhook-server-cert" not found Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.283560 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qss75\" (UniqueName: \"kubernetes.io/projected/f9c94487-8e74-4cc0-ac40-834c175a770f-kube-api-access-qss75\") pod \"nova-operator-controller-manager-79556f57fc-vvgng\" (UID: \"f9c94487-8e74-4cc0-ac40-834c175a770f\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vvgng" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.340113 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vvgng" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.344531 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtq2h\" (UniqueName: \"kubernetes.io/projected/c2fd1946-a3cb-453e-a1f3-458e14cb35ec-kube-api-access-jtq2h\") pod \"octavia-operator-controller-manager-64cdc6ff96-ktqqj\" (UID: \"c2fd1946-a3cb-453e-a1f3-458e14cb35ec\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-ktqqj" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.374161 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-28rch"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.375593 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-28rch" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.387705 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4c92eac3-127a-4b96-adf3-e3e52ba9015d-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx\" (UID: \"4c92eac3-127a-4b96-adf3-e3e52ba9015d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.387765 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vch2t\" (UniqueName: \"kubernetes.io/projected/4c92eac3-127a-4b96-adf3-e3e52ba9015d-kube-api-access-vch2t\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx\" (UID: \"4c92eac3-127a-4b96-adf3-e3e52ba9015d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.387786 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6qmp\" (UniqueName: \"kubernetes.io/projected/e00c03c3-987c-4a5a-9c6f-2d15cd86a639-kube-api-access-z6qmp\") pod \"ovn-operator-controller-manager-56897c768d-sv67d\" (UID: \"e00c03c3-987c-4a5a-9c6f-2d15cd86a639\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-sv67d" Nov 28 11:24:34 crc kubenswrapper[4923]: E1128 11:24:34.388145 4923 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 11:24:34 crc kubenswrapper[4923]: E1128 11:24:34.388203 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4c92eac3-127a-4b96-adf3-e3e52ba9015d-cert podName:4c92eac3-127a-4b96-adf3-e3e52ba9015d nodeName:}" failed. No retries permitted until 2025-11-28 11:24:34.888190359 +0000 UTC m=+954.016874569 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4c92eac3-127a-4b96-adf3-e3e52ba9015d-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" (UID: "4c92eac3-127a-4b96-adf3-e3e52ba9015d") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.393887 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-zhjlq" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.413709 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6qmp\" (UniqueName: \"kubernetes.io/projected/e00c03c3-987c-4a5a-9c6f-2d15cd86a639-kube-api-access-z6qmp\") pod \"ovn-operator-controller-manager-56897c768d-sv67d\" (UID: \"e00c03c3-987c-4a5a-9c6f-2d15cd86a639\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-sv67d" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.426205 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-t9tvv"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.427487 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-t9tvv" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.436688 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-4jbhh" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.438434 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vch2t\" (UniqueName: \"kubernetes.io/projected/4c92eac3-127a-4b96-adf3-e3e52ba9015d-kube-api-access-vch2t\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx\" (UID: \"4c92eac3-127a-4b96-adf3-e3e52ba9015d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.489040 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b47r5\" (UniqueName: \"kubernetes.io/projected/f193a1a9-1c5d-4d16-a9c1-3a17530bed74-kube-api-access-b47r5\") pod \"swift-operator-controller-manager-d77b94747-t9tvv\" (UID: \"f193a1a9-1c5d-4d16-a9c1-3a17530bed74\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-t9tvv" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.489367 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xrdg\" (UniqueName: \"kubernetes.io/projected/b5fe44dd-beba-450d-a04a-59f3046ab0bb-kube-api-access-7xrdg\") pod \"placement-operator-controller-manager-57988cc5b5-28rch\" (UID: \"b5fe44dd-beba-450d-a04a-59f3046ab0bb\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-28rch" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.493993 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-28rch"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.501008 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-t9tvv"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.516715 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5vgzd"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.517818 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5vgzd" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.522499 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-dkmrq" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.590537 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmwzk\" (UniqueName: \"kubernetes.io/projected/1745a114-0278-4e37-9f5a-34ccaa421f19-kube-api-access-nmwzk\") pod \"telemetry-operator-controller-manager-76cc84c6bb-5vgzd\" (UID: \"1745a114-0278-4e37-9f5a-34ccaa421f19\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5vgzd" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.590613 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b47r5\" (UniqueName: \"kubernetes.io/projected/f193a1a9-1c5d-4d16-a9c1-3a17530bed74-kube-api-access-b47r5\") pod \"swift-operator-controller-manager-d77b94747-t9tvv\" (UID: \"f193a1a9-1c5d-4d16-a9c1-3a17530bed74\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-t9tvv" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.590673 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xrdg\" (UniqueName: \"kubernetes.io/projected/b5fe44dd-beba-450d-a04a-59f3046ab0bb-kube-api-access-7xrdg\") pod \"placement-operator-controller-manager-57988cc5b5-28rch\" (UID: \"b5fe44dd-beba-450d-a04a-59f3046ab0bb\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-28rch" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.597114 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5vgzd"] Nov 28 11:24:34 crc kubenswrapper[4923]: E1128 11:24:34.593913 4923 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c55fa30_52ca_462f_871b_85851e250a99.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c55fa30_52ca_462f_871b_85851e250a99.slice/crio-848ba12c69a9eacbe11ea3490ae46184ddc68ab2e2cfb97289128fabc175b9ac\": RecentStats: unable to find data in memory cache]" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.613708 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b47r5\" (UniqueName: \"kubernetes.io/projected/f193a1a9-1c5d-4d16-a9c1-3a17530bed74-kube-api-access-b47r5\") pod \"swift-operator-controller-manager-d77b94747-t9tvv\" (UID: \"f193a1a9-1c5d-4d16-a9c1-3a17530bed74\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-t9tvv" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.614448 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-646x4"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.616515 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-646x4" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.617449 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-ktqqj" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.619006 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-s6k9g" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.621838 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xrdg\" (UniqueName: \"kubernetes.io/projected/b5fe44dd-beba-450d-a04a-59f3046ab0bb-kube-api-access-7xrdg\") pod \"placement-operator-controller-manager-57988cc5b5-28rch\" (UID: \"b5fe44dd-beba-450d-a04a-59f3046ab0bb\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-28rch" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.624582 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-sv67d" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.636150 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-2tvqk"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.637379 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-2tvqk" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.641183 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-pmkvt" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.647985 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-2tvqk"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.695012 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfmvq\" (UniqueName: \"kubernetes.io/projected/4a5bf8f0-87dd-4e04-bd23-8379f541b020-kube-api-access-lfmvq\") pod \"watcher-operator-controller-manager-656dcb59d4-646x4\" (UID: \"4a5bf8f0-87dd-4e04-bd23-8379f541b020\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-646x4" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.695272 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmwzk\" (UniqueName: \"kubernetes.io/projected/1745a114-0278-4e37-9f5a-34ccaa421f19-kube-api-access-nmwzk\") pod \"telemetry-operator-controller-manager-76cc84c6bb-5vgzd\" (UID: \"1745a114-0278-4e37-9f5a-34ccaa421f19\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5vgzd" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.733203 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmwzk\" (UniqueName: \"kubernetes.io/projected/1745a114-0278-4e37-9f5a-34ccaa421f19-kube-api-access-nmwzk\") pod \"telemetry-operator-controller-manager-76cc84c6bb-5vgzd\" (UID: \"1745a114-0278-4e37-9f5a-34ccaa421f19\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5vgzd" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.767973 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-646x4"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.789907 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-28rch" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.807545 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfmvq\" (UniqueName: \"kubernetes.io/projected/4a5bf8f0-87dd-4e04-bd23-8379f541b020-kube-api-access-lfmvq\") pod \"watcher-operator-controller-manager-656dcb59d4-646x4\" (UID: \"4a5bf8f0-87dd-4e04-bd23-8379f541b020\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-646x4" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.807618 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-664lv\" (UniqueName: \"kubernetes.io/projected/f6fd5062-da63-4fb0-bb4d-80643cb85ca7-kube-api-access-664lv\") pod \"test-operator-controller-manager-5cd6c7f4c8-2tvqk\" (UID: \"f6fd5062-da63-4fb0-bb4d-80643cb85ca7\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-2tvqk" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.808345 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n887x"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.828981 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.829890 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.839271 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-t9tvv" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.840230 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.840362 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.840458 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-mtmmb" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.863821 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfmvq\" (UniqueName: \"kubernetes.io/projected/4a5bf8f0-87dd-4e04-bd23-8379f541b020-kube-api-access-lfmvq\") pod \"watcher-operator-controller-manager-656dcb59d4-646x4\" (UID: \"4a5bf8f0-87dd-4e04-bd23-8379f541b020\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-646x4" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.885977 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.886439 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5vgzd" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.899676 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n887x" event={"ID":"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab","Type":"ContainerStarted","Data":"5c0d3c48d8e89c18967a9bc9f3d9abb9c9c3711474b085318b5e7ed5ab4bde5d"} Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.910518 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-webhook-certs\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.910579 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4c92eac3-127a-4b96-adf3-e3e52ba9015d-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx\" (UID: \"4c92eac3-127a-4b96-adf3-e3e52ba9015d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.910605 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d682x\" (UniqueName: \"kubernetes.io/projected/7d3c8ccd-8582-467e-9017-4f08eaac26ab-kube-api-access-d682x\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.910680 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-metrics-certs\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.910704 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-664lv\" (UniqueName: \"kubernetes.io/projected/f6fd5062-da63-4fb0-bb4d-80643cb85ca7-kube-api-access-664lv\") pod \"test-operator-controller-manager-5cd6c7f4c8-2tvqk\" (UID: \"f6fd5062-da63-4fb0-bb4d-80643cb85ca7\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-2tvqk" Nov 28 11:24:34 crc kubenswrapper[4923]: E1128 11:24:34.911114 4923 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 11:24:34 crc kubenswrapper[4923]: E1128 11:24:34.911150 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4c92eac3-127a-4b96-adf3-e3e52ba9015d-cert podName:4c92eac3-127a-4b96-adf3-e3e52ba9015d nodeName:}" failed. No retries permitted until 2025-11-28 11:24:35.911137746 +0000 UTC m=+955.039821956 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4c92eac3-127a-4b96-adf3-e3e52ba9015d-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" (UID: "4c92eac3-127a-4b96-adf3-e3e52ba9015d") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.922971 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zl5xb"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.923912 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zl5xb" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.930908 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zl5xb"] Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.934716 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-rcwxk" Nov 28 11:24:34 crc kubenswrapper[4923]: I1128 11:24:34.950896 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-664lv\" (UniqueName: \"kubernetes.io/projected/f6fd5062-da63-4fb0-bb4d-80643cb85ca7-kube-api-access-664lv\") pod \"test-operator-controller-manager-5cd6c7f4c8-2tvqk\" (UID: \"f6fd5062-da63-4fb0-bb4d-80643cb85ca7\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-2tvqk" Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.014367 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dsww\" (UniqueName: \"kubernetes.io/projected/44d18a2d-97f3-4e4a-82bf-6de8634c7585-kube-api-access-4dsww\") pod \"rabbitmq-cluster-operator-manager-668c99d594-zl5xb\" (UID: \"44d18a2d-97f3-4e4a-82bf-6de8634c7585\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zl5xb" Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.014411 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-metrics-certs\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.014465 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-webhook-certs\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.014504 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d682x\" (UniqueName: \"kubernetes.io/projected/7d3c8ccd-8582-467e-9017-4f08eaac26ab-kube-api-access-d682x\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.014531 4923 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.014592 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-metrics-certs podName:7d3c8ccd-8582-467e-9017-4f08eaac26ab nodeName:}" failed. No retries permitted until 2025-11-28 11:24:35.514575727 +0000 UTC m=+954.643259937 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-metrics-certs") pod "openstack-operator-controller-manager-6fbf799579-4qrnz" (UID: "7d3c8ccd-8582-467e-9017-4f08eaac26ab") : secret "metrics-server-cert" not found Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.014711 4923 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.014732 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-webhook-certs podName:7d3c8ccd-8582-467e-9017-4f08eaac26ab nodeName:}" failed. No retries permitted until 2025-11-28 11:24:35.514725811 +0000 UTC m=+954.643410021 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-webhook-certs") pod "openstack-operator-controller-manager-6fbf799579-4qrnz" (UID: "7d3c8ccd-8582-467e-9017-4f08eaac26ab") : secret "webhook-server-cert" not found Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.019485 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-kmtzj"] Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.021619 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-646x4" Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.034095 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-2tvqk" Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.035234 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d682x\" (UniqueName: \"kubernetes.io/projected/7d3c8ccd-8582-467e-9017-4f08eaac26ab-kube-api-access-d682x\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.169835 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dsww\" (UniqueName: \"kubernetes.io/projected/44d18a2d-97f3-4e4a-82bf-6de8634c7585-kube-api-access-4dsww\") pod \"rabbitmq-cluster-operator-manager-668c99d594-zl5xb\" (UID: \"44d18a2d-97f3-4e4a-82bf-6de8634c7585\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zl5xb" Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.203491 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dsww\" (UniqueName: \"kubernetes.io/projected/44d18a2d-97f3-4e4a-82bf-6de8634c7585-kube-api-access-4dsww\") pod \"rabbitmq-cluster-operator-manager-668c99d594-zl5xb\" (UID: \"44d18a2d-97f3-4e4a-82bf-6de8634c7585\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zl5xb" Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.375776 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ec4c9bb0-95fa-4840-8b48-de2b822bb788-cert\") pod \"infra-operator-controller-manager-57548d458d-2snwf\" (UID: \"ec4c9bb0-95fa-4840-8b48-de2b822bb788\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.375956 4923 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.376025 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ec4c9bb0-95fa-4840-8b48-de2b822bb788-cert podName:ec4c9bb0-95fa-4840-8b48-de2b822bb788 nodeName:}" failed. No retries permitted until 2025-11-28 11:24:37.376008408 +0000 UTC m=+956.504692618 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ec4c9bb0-95fa-4840-8b48-de2b822bb788-cert") pod "infra-operator-controller-manager-57548d458d-2snwf" (UID: "ec4c9bb0-95fa-4840-8b48-de2b822bb788") : secret "infra-operator-webhook-server-cert" not found Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.412033 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zl5xb" Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.483702 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-98w4g"] Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.512737 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-8ftq2"] Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.537383 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-jtpmz"] Nov 28 11:24:35 crc kubenswrapper[4923]: W1128 11:24:35.548020 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2ea0b3c3_a5d4_4b2f_81ef_a52573d37e06.slice/crio-b1f620518d84a73bef6064f7d4d57b57433db4b6a01db423bddd491d38cd940d WatchSource:0}: Error finding container b1f620518d84a73bef6064f7d4d57b57433db4b6a01db423bddd491d38cd940d: Status 404 returned error can't find the container with id b1f620518d84a73bef6064f7d4d57b57433db4b6a01db423bddd491d38cd940d Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.578749 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-metrics-certs\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.578806 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-webhook-certs\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.578925 4923 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.578992 4923 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.579009 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-metrics-certs podName:7d3c8ccd-8582-467e-9017-4f08eaac26ab nodeName:}" failed. No retries permitted until 2025-11-28 11:24:36.578991611 +0000 UTC m=+955.707675821 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-metrics-certs") pod "openstack-operator-controller-manager-6fbf799579-4qrnz" (UID: "7d3c8ccd-8582-467e-9017-4f08eaac26ab") : secret "metrics-server-cert" not found Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.579096 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-webhook-certs podName:7d3c8ccd-8582-467e-9017-4f08eaac26ab nodeName:}" failed. No retries permitted until 2025-11-28 11:24:36.579079043 +0000 UTC m=+955.707763253 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-webhook-certs") pod "openstack-operator-controller-manager-6fbf799579-4qrnz" (UID: "7d3c8ccd-8582-467e-9017-4f08eaac26ab") : secret "webhook-server-cert" not found Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.627810 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qc4bb"] Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.640743 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-vvgng"] Nov 28 11:24:35 crc kubenswrapper[4923]: W1128 11:24:35.649374 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf9c94487_8e74_4cc0_ac40_834c175a770f.slice/crio-7314f9ea699e198b818946089fc48643caa205989c0119e84bc03ac146b24dc4 WatchSource:0}: Error finding container 7314f9ea699e198b818946089fc48643caa205989c0119e84bc03ac146b24dc4: Status 404 returned error can't find the container with id 7314f9ea699e198b818946089fc48643caa205989c0119e84bc03ac146b24dc4 Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.650832 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-9z8n7"] Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.662922 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-kbm2r"] Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.669231 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-p9k2s"] Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.672832 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-sk9fr"] Nov 28 11:24:35 crc kubenswrapper[4923]: W1128 11:24:35.688853 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc434efb7_70cf_4c94_be0d_9635325d758c.slice/crio-6d78aed744825c23f411678b5a948b5cca56f497ddf6aeaafe5af445aae1ff49 WatchSource:0}: Error finding container 6d78aed744825c23f411678b5a948b5cca56f497ddf6aeaafe5af445aae1ff49: Status 404 returned error can't find the container with id 6d78aed744825c23f411678b5a948b5cca56f497ddf6aeaafe5af445aae1ff49 Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.826045 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-j72jx"] Nov 28 11:24:35 crc kubenswrapper[4923]: W1128 11:24:35.834922 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52b8b64e_5401_41ef_8d65_cc275cdaf832.slice/crio-bc2dc501c2a360ff50f425e0d0c1c1f70da4bd619e7f8bb03699128bb0a8fb32 WatchSource:0}: Error finding container bc2dc501c2a360ff50f425e0d0c1c1f70da4bd619e7f8bb03699128bb0a8fb32: Status 404 returned error can't find the container with id bc2dc501c2a360ff50f425e0d0c1c1f70da4bd619e7f8bb03699128bb0a8fb32 Nov 28 11:24:35 crc kubenswrapper[4923]: W1128 11:24:35.845465 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode00c03c3_987c_4a5a_9c6f_2d15cd86a639.slice/crio-4ae3c17573021fb67ef5e326f1985a062835981bfc1bf395ce544120b41c873a WatchSource:0}: Error finding container 4ae3c17573021fb67ef5e326f1985a062835981bfc1bf395ce544120b41c873a: Status 404 returned error can't find the container with id 4ae3c17573021fb67ef5e326f1985a062835981bfc1bf395ce544120b41c873a Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.845689 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-sv67d"] Nov 28 11:24:35 crc kubenswrapper[4923]: W1128 11:24:35.846564 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf193a1a9_1c5d_4d16_a9c1_3a17530bed74.slice/crio-5062b28b2acbca54904e7661db58eb6fe6be20008a55046d4b83a78c0c876aa3 WatchSource:0}: Error finding container 5062b28b2acbca54904e7661db58eb6fe6be20008a55046d4b83a78c0c876aa3: Status 404 returned error can't find the container with id 5062b28b2acbca54904e7661db58eb6fe6be20008a55046d4b83a78c0c876aa3 Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.850122 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-b47r5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d77b94747-t9tvv_openstack-operators(f193a1a9-1c5d-4d16-a9c1-3a17530bed74): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.850147 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-t9tvv"] Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.851827 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-b47r5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d77b94747-t9tvv_openstack-operators(f193a1a9-1c5d-4d16-a9c1-3a17530bed74): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.853620 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-t9tvv" podUID="f193a1a9-1c5d-4d16-a9c1-3a17530bed74" Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.859048 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-v8vn8"] Nov 28 11:24:35 crc kubenswrapper[4923]: W1128 11:24:35.861463 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poded2b1137_a903_4224_b706_304a2f416007.slice/crio-c7ccdae559d90dd393d717dda17a0c7567413de657d71e7d30f4f042a0856ed3 WatchSource:0}: Error finding container c7ccdae559d90dd393d717dda17a0c7567413de657d71e7d30f4f042a0856ed3: Status 404 returned error can't find the container with id c7ccdae559d90dd393d717dda17a0c7567413de657d71e7d30f4f042a0856ed3 Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.863973 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:89910bc3ecceb7590d3207ac294eb7354de358cf39ef03c72323b26c598e50e6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dhxn8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5d499bf58b-v8vn8_openstack-operators(ed2b1137-a903-4224-b706-304a2f416007): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.866192 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dhxn8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5d499bf58b-v8vn8_openstack-operators(ed2b1137-a903-4224-b706-304a2f416007): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.867447 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-v8vn8" podUID="ed2b1137-a903-4224-b706-304a2f416007" Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.914804 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qc4bb" event={"ID":"bd6c3e5b-2eb9-4f4b-8893-07aab7091fab","Type":"ContainerStarted","Data":"9030458cc07cc293145ee47ecc193015d3f105550c7b0f84b5b70537d4266661"} Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.916410 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-j72jx" event={"ID":"52b8b64e-5401-41ef-8d65-cc275cdaf832","Type":"ContainerStarted","Data":"bc2dc501c2a360ff50f425e0d0c1c1f70da4bd619e7f8bb03699128bb0a8fb32"} Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.917767 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-8ftq2" event={"ID":"e469dd36-fba4-4342-8fd6-ef847f821393","Type":"ContainerStarted","Data":"24b24236a7b6cac602c3ca2d3048bfcb58eaeb27e6a114b0a65b39511122ab81"} Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.918601 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vvgng" event={"ID":"f9c94487-8e74-4cc0-ac40-834c175a770f","Type":"ContainerStarted","Data":"7314f9ea699e198b818946089fc48643caa205989c0119e84bc03ac146b24dc4"} Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.919439 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-p9k2s" event={"ID":"09a1376c-00d7-4540-a905-078c297241cb","Type":"ContainerStarted","Data":"91aaae5f5caf88177142096bc804b464c8069f3cf070c55f8eade60a74196efc"} Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.920623 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-jtpmz" event={"ID":"2ea0b3c3-a5d4-4b2f-81ef-a52573d37e06","Type":"ContainerStarted","Data":"b1f620518d84a73bef6064f7d4d57b57433db4b6a01db423bddd491d38cd940d"} Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.921277 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-9z8n7" event={"ID":"5d041894-6ce7-401f-9b0b-5d5a9e31a68d","Type":"ContainerStarted","Data":"971d2d63e8e476d482ce56e0a4e32bf48737739f3871701fdf0723d3f5d97600"} Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.921858 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-kbm2r" event={"ID":"eb007735-97dd-4d13-9b3d-28adefb557e1","Type":"ContainerStarted","Data":"7fa5f942c2d52c6fd18ca916c213b688a075fc7f955482f3c5506b164174e437"} Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.922781 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-v8vn8" event={"ID":"ed2b1137-a903-4224-b706-304a2f416007","Type":"ContainerStarted","Data":"c7ccdae559d90dd393d717dda17a0c7567413de657d71e7d30f4f042a0856ed3"} Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.927947 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-98w4g" event={"ID":"a813ab03-9734-4a76-aef0-62c7606c85d5","Type":"ContainerStarted","Data":"bc8eb50e0f90c366c44064ba5b2b126616907711b104f9bb795d446369e571ac"} Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.928445 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:89910bc3ecceb7590d3207ac294eb7354de358cf39ef03c72323b26c598e50e6\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-v8vn8" podUID="ed2b1137-a903-4224-b706-304a2f416007" Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.930967 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-kmtzj" event={"ID":"2ef6846b-733b-4c63-8add-5c3251658a7e","Type":"ContainerStarted","Data":"9be3bcae73f7b735664af720c14206ce1e92f9326b093daa7995728f6a56ab36"} Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.932550 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-sv67d" event={"ID":"e00c03c3-987c-4a5a-9c6f-2d15cd86a639","Type":"ContainerStarted","Data":"4ae3c17573021fb67ef5e326f1985a062835981bfc1bf395ce544120b41c873a"} Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.944885 4923 generic.go:334] "Generic (PLEG): container finished" podID="6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab" containerID="0fbffee5c0cc9f70f29b58dd17a0d0b931279a175aa3107a205b61bfa1a8fb35" exitCode=0 Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.945402 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n887x" event={"ID":"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab","Type":"ContainerDied","Data":"0fbffee5c0cc9f70f29b58dd17a0d0b931279a175aa3107a205b61bfa1a8fb35"} Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.947043 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-sk9fr" event={"ID":"c434efb7-70cf-4c94-be0d-9635325d758c","Type":"ContainerStarted","Data":"6d78aed744825c23f411678b5a948b5cca56f497ddf6aeaafe5af445aae1ff49"} Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.949710 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-t9tvv" event={"ID":"f193a1a9-1c5d-4d16-a9c1-3a17530bed74","Type":"ContainerStarted","Data":"5062b28b2acbca54904e7661db58eb6fe6be20008a55046d4b83a78c0c876aa3"} Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.958223 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-t9tvv" podUID="f193a1a9-1c5d-4d16-a9c1-3a17530bed74" Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.984166 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4c92eac3-127a-4b96-adf3-e3e52ba9015d-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx\" (UID: \"4c92eac3-127a-4b96-adf3-e3e52ba9015d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.984319 4923 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 11:24:35 crc kubenswrapper[4923]: E1128 11:24:35.984370 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4c92eac3-127a-4b96-adf3-e3e52ba9015d-cert podName:4c92eac3-127a-4b96-adf3-e3e52ba9015d nodeName:}" failed. No retries permitted until 2025-11-28 11:24:37.984356759 +0000 UTC m=+957.113040969 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4c92eac3-127a-4b96-adf3-e3e52ba9015d-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" (UID: "4c92eac3-127a-4b96-adf3-e3e52ba9015d") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 11:24:35 crc kubenswrapper[4923]: I1128 11:24:35.997025 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-ktqqj"] Nov 28 11:24:36 crc kubenswrapper[4923]: I1128 11:24:36.020770 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5vgzd"] Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.023150 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jtq2h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-64cdc6ff96-ktqqj_openstack-operators(c2fd1946-a3cb-453e-a1f3-458e14cb35ec): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.026087 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jtq2h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-64cdc6ff96-ktqqj_openstack-operators(c2fd1946-a3cb-453e-a1f3-458e14cb35ec): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.027365 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-ktqqj" podUID="c2fd1946-a3cb-453e-a1f3-458e14cb35ec" Nov 28 11:24:36 crc kubenswrapper[4923]: I1128 11:24:36.037087 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-28rch"] Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.051759 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nmwzk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-5vgzd_openstack-operators(1745a114-0278-4e37-9f5a-34ccaa421f19): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.054686 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lfmvq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-646x4_openstack-operators(4a5bf8f0-87dd-4e04-bd23-8379f541b020): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.055436 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nmwzk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-5vgzd_openstack-operators(1745a114-0278-4e37-9f5a-34ccaa421f19): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.057595 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5vgzd" podUID="1745a114-0278-4e37-9f5a-34ccaa421f19" Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.058663 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lfmvq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-646x4_openstack-operators(4a5bf8f0-87dd-4e04-bd23-8379f541b020): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.059975 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-646x4" podUID="4a5bf8f0-87dd-4e04-bd23-8379f541b020" Nov 28 11:24:36 crc kubenswrapper[4923]: W1128 11:24:36.064369 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb5fe44dd_beba_450d_a04a_59f3046ab0bb.slice/crio-711735680bd56aa1e2771bb06dc136807d9001e6db07ee432f9151d0c5c85936 WatchSource:0}: Error finding container 711735680bd56aa1e2771bb06dc136807d9001e6db07ee432f9151d0c5c85936: Status 404 returned error can't find the container with id 711735680bd56aa1e2771bb06dc136807d9001e6db07ee432f9151d0c5c85936 Nov 28 11:24:36 crc kubenswrapper[4923]: I1128 11:24:36.064421 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-646x4"] Nov 28 11:24:36 crc kubenswrapper[4923]: W1128 11:24:36.075054 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod44d18a2d_97f3_4e4a_82bf_6de8634c7585.slice/crio-a0357445ffa248dd2accf7954fab0c9a55794793f9ca2b9c92da6d7fe7698fb2 WatchSource:0}: Error finding container a0357445ffa248dd2accf7954fab0c9a55794793f9ca2b9c92da6d7fe7698fb2: Status 404 returned error can't find the container with id a0357445ffa248dd2accf7954fab0c9a55794793f9ca2b9c92da6d7fe7698fb2 Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.077865 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-664lv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-2tvqk_openstack-operators(f6fd5062-da63-4fb0-bb4d-80643cb85ca7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.080128 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4dsww,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-zl5xb_openstack-operators(44d18a2d-97f3-4e4a-82bf-6de8634c7585): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.080209 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-664lv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-2tvqk_openstack-operators(f6fd5062-da63-4fb0-bb4d-80643cb85ca7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.080284 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7xrdg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-57988cc5b5-28rch_openstack-operators(b5fe44dd-beba-450d-a04a-59f3046ab0bb): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 11:24:36 crc kubenswrapper[4923]: I1128 11:24:36.080349 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-2tvqk"] Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.081798 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zl5xb" podUID="44d18a2d-97f3-4e4a-82bf-6de8634c7585" Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.081842 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-2tvqk" podUID="f6fd5062-da63-4fb0-bb4d-80643cb85ca7" Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.082278 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7xrdg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-57988cc5b5-28rch_openstack-operators(b5fe44dd-beba-450d-a04a-59f3046ab0bb): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.084147 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-28rch" podUID="b5fe44dd-beba-450d-a04a-59f3046ab0bb" Nov 28 11:24:36 crc kubenswrapper[4923]: I1128 11:24:36.087508 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zl5xb"] Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.593285 4923 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.593348 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-webhook-certs podName:7d3c8ccd-8582-467e-9017-4f08eaac26ab nodeName:}" failed. No retries permitted until 2025-11-28 11:24:38.593326507 +0000 UTC m=+957.722010717 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-webhook-certs") pod "openstack-operator-controller-manager-6fbf799579-4qrnz" (UID: "7d3c8ccd-8582-467e-9017-4f08eaac26ab") : secret "webhook-server-cert" not found Nov 28 11:24:36 crc kubenswrapper[4923]: I1128 11:24:36.593627 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-webhook-certs\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:36 crc kubenswrapper[4923]: I1128 11:24:36.593818 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-metrics-certs\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.593891 4923 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 11:24:36 crc kubenswrapper[4923]: E1128 11:24:36.594043 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-metrics-certs podName:7d3c8ccd-8582-467e-9017-4f08eaac26ab nodeName:}" failed. No retries permitted until 2025-11-28 11:24:38.594015056 +0000 UTC m=+957.722699266 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-metrics-certs") pod "openstack-operator-controller-manager-6fbf799579-4qrnz" (UID: "7d3c8ccd-8582-467e-9017-4f08eaac26ab") : secret "metrics-server-cert" not found Nov 28 11:24:36 crc kubenswrapper[4923]: I1128 11:24:36.981715 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zl5xb" event={"ID":"44d18a2d-97f3-4e4a-82bf-6de8634c7585","Type":"ContainerStarted","Data":"a0357445ffa248dd2accf7954fab0c9a55794793f9ca2b9c92da6d7fe7698fb2"} Nov 28 11:24:36 crc kubenswrapper[4923]: I1128 11:24:36.992489 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-646x4" event={"ID":"4a5bf8f0-87dd-4e04-bd23-8379f541b020","Type":"ContainerStarted","Data":"d678bc77a828dcbcc1d1ef268b41cb3197beff06b770821c2895944717dc6d53"} Nov 28 11:24:37 crc kubenswrapper[4923]: I1128 11:24:37.007227 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-2tvqk" event={"ID":"f6fd5062-da63-4fb0-bb4d-80643cb85ca7","Type":"ContainerStarted","Data":"a548a4d257dea9b97c43ed1c562c0bf824770ff1fae25ef2353b8850f40ee155"} Nov 28 11:24:37 crc kubenswrapper[4923]: I1128 11:24:37.022475 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5vgzd" event={"ID":"1745a114-0278-4e37-9f5a-34ccaa421f19","Type":"ContainerStarted","Data":"ada79e2b88c401cec7d4086ae5ce58fb41e79843f7a54c2e5d41135b34c7c9d1"} Nov 28 11:24:37 crc kubenswrapper[4923]: E1128 11:24:37.029062 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zl5xb" podUID="44d18a2d-97f3-4e4a-82bf-6de8634c7585" Nov 28 11:24:37 crc kubenswrapper[4923]: I1128 11:24:37.030664 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-ktqqj" event={"ID":"c2fd1946-a3cb-453e-a1f3-458e14cb35ec","Type":"ContainerStarted","Data":"5e933fe62ed0f3273a3546a11709e47c35f54a5dce3c92aa66dfb344760639c1"} Nov 28 11:24:37 crc kubenswrapper[4923]: E1128 11:24:37.037352 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-2tvqk" podUID="f6fd5062-da63-4fb0-bb4d-80643cb85ca7" Nov 28 11:24:37 crc kubenswrapper[4923]: E1128 11:24:37.037409 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-646x4" podUID="4a5bf8f0-87dd-4e04-bd23-8379f541b020" Nov 28 11:24:37 crc kubenswrapper[4923]: E1128 11:24:37.037484 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5vgzd" podUID="1745a114-0278-4e37-9f5a-34ccaa421f19" Nov 28 11:24:37 crc kubenswrapper[4923]: E1128 11:24:37.041834 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-ktqqj" podUID="c2fd1946-a3cb-453e-a1f3-458e14cb35ec" Nov 28 11:24:37 crc kubenswrapper[4923]: I1128 11:24:37.049966 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-28rch" event={"ID":"b5fe44dd-beba-450d-a04a-59f3046ab0bb","Type":"ContainerStarted","Data":"711735680bd56aa1e2771bb06dc136807d9001e6db07ee432f9151d0c5c85936"} Nov 28 11:24:37 crc kubenswrapper[4923]: E1128 11:24:37.064102 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-t9tvv" podUID="f193a1a9-1c5d-4d16-a9c1-3a17530bed74" Nov 28 11:24:37 crc kubenswrapper[4923]: E1128 11:24:37.064184 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:89910bc3ecceb7590d3207ac294eb7354de358cf39ef03c72323b26c598e50e6\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-v8vn8" podUID="ed2b1137-a903-4224-b706-304a2f416007" Nov 28 11:24:37 crc kubenswrapper[4923]: E1128 11:24:37.064225 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-28rch" podUID="b5fe44dd-beba-450d-a04a-59f3046ab0bb" Nov 28 11:24:37 crc kubenswrapper[4923]: I1128 11:24:37.402956 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ec4c9bb0-95fa-4840-8b48-de2b822bb788-cert\") pod \"infra-operator-controller-manager-57548d458d-2snwf\" (UID: \"ec4c9bb0-95fa-4840-8b48-de2b822bb788\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" Nov 28 11:24:37 crc kubenswrapper[4923]: E1128 11:24:37.403156 4923 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 11:24:37 crc kubenswrapper[4923]: E1128 11:24:37.403370 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ec4c9bb0-95fa-4840-8b48-de2b822bb788-cert podName:ec4c9bb0-95fa-4840-8b48-de2b822bb788 nodeName:}" failed. No retries permitted until 2025-11-28 11:24:41.403351942 +0000 UTC m=+960.532036152 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ec4c9bb0-95fa-4840-8b48-de2b822bb788-cert") pod "infra-operator-controller-manager-57548d458d-2snwf" (UID: "ec4c9bb0-95fa-4840-8b48-de2b822bb788") : secret "infra-operator-webhook-server-cert" not found Nov 28 11:24:38 crc kubenswrapper[4923]: I1128 11:24:38.010888 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4c92eac3-127a-4b96-adf3-e3e52ba9015d-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx\" (UID: \"4c92eac3-127a-4b96-adf3-e3e52ba9015d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" Nov 28 11:24:38 crc kubenswrapper[4923]: E1128 11:24:38.011137 4923 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 11:24:38 crc kubenswrapper[4923]: E1128 11:24:38.011230 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4c92eac3-127a-4b96-adf3-e3e52ba9015d-cert podName:4c92eac3-127a-4b96-adf3-e3e52ba9015d nodeName:}" failed. No retries permitted until 2025-11-28 11:24:42.011194548 +0000 UTC m=+961.139878748 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4c92eac3-127a-4b96-adf3-e3e52ba9015d-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" (UID: "4c92eac3-127a-4b96-adf3-e3e52ba9015d") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 11:24:38 crc kubenswrapper[4923]: I1128 11:24:38.068003 4923 generic.go:334] "Generic (PLEG): container finished" podID="6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab" containerID="ffab78d48d82d6a4298e56437c47e1e016cb8ab65bab495f1a0239578c300a3f" exitCode=0 Nov 28 11:24:38 crc kubenswrapper[4923]: I1128 11:24:38.068119 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n887x" event={"ID":"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab","Type":"ContainerDied","Data":"ffab78d48d82d6a4298e56437c47e1e016cb8ab65bab495f1a0239578c300a3f"} Nov 28 11:24:38 crc kubenswrapper[4923]: E1128 11:24:38.071687 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zl5xb" podUID="44d18a2d-97f3-4e4a-82bf-6de8634c7585" Nov 28 11:24:38 crc kubenswrapper[4923]: E1128 11:24:38.071818 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-28rch" podUID="b5fe44dd-beba-450d-a04a-59f3046ab0bb" Nov 28 11:24:38 crc kubenswrapper[4923]: E1128 11:24:38.071899 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-ktqqj" podUID="c2fd1946-a3cb-453e-a1f3-458e14cb35ec" Nov 28 11:24:38 crc kubenswrapper[4923]: E1128 11:24:38.071999 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5vgzd" podUID="1745a114-0278-4e37-9f5a-34ccaa421f19" Nov 28 11:24:38 crc kubenswrapper[4923]: E1128 11:24:38.072838 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-646x4" podUID="4a5bf8f0-87dd-4e04-bd23-8379f541b020" Nov 28 11:24:38 crc kubenswrapper[4923]: E1128 11:24:38.072913 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-2tvqk" podUID="f6fd5062-da63-4fb0-bb4d-80643cb85ca7" Nov 28 11:24:38 crc kubenswrapper[4923]: I1128 11:24:38.622148 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-metrics-certs\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:38 crc kubenswrapper[4923]: I1128 11:24:38.622238 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-webhook-certs\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:38 crc kubenswrapper[4923]: E1128 11:24:38.622368 4923 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 11:24:38 crc kubenswrapper[4923]: E1128 11:24:38.622370 4923 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 11:24:38 crc kubenswrapper[4923]: E1128 11:24:38.622430 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-metrics-certs podName:7d3c8ccd-8582-467e-9017-4f08eaac26ab nodeName:}" failed. No retries permitted until 2025-11-28 11:24:42.622415439 +0000 UTC m=+961.751099649 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-metrics-certs") pod "openstack-operator-controller-manager-6fbf799579-4qrnz" (UID: "7d3c8ccd-8582-467e-9017-4f08eaac26ab") : secret "metrics-server-cert" not found Nov 28 11:24:38 crc kubenswrapper[4923]: E1128 11:24:38.622444 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-webhook-certs podName:7d3c8ccd-8582-467e-9017-4f08eaac26ab nodeName:}" failed. No retries permitted until 2025-11-28 11:24:42.62243963 +0000 UTC m=+961.751123840 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-webhook-certs") pod "openstack-operator-controller-manager-6fbf799579-4qrnz" (UID: "7d3c8ccd-8582-467e-9017-4f08eaac26ab") : secret "webhook-server-cert" not found Nov 28 11:24:41 crc kubenswrapper[4923]: I1128 11:24:41.476700 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ec4c9bb0-95fa-4840-8b48-de2b822bb788-cert\") pod \"infra-operator-controller-manager-57548d458d-2snwf\" (UID: \"ec4c9bb0-95fa-4840-8b48-de2b822bb788\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" Nov 28 11:24:41 crc kubenswrapper[4923]: E1128 11:24:41.476899 4923 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 11:24:41 crc kubenswrapper[4923]: E1128 11:24:41.477087 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ec4c9bb0-95fa-4840-8b48-de2b822bb788-cert podName:ec4c9bb0-95fa-4840-8b48-de2b822bb788 nodeName:}" failed. No retries permitted until 2025-11-28 11:24:49.477070235 +0000 UTC m=+968.605754445 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ec4c9bb0-95fa-4840-8b48-de2b822bb788-cert") pod "infra-operator-controller-manager-57548d458d-2snwf" (UID: "ec4c9bb0-95fa-4840-8b48-de2b822bb788") : secret "infra-operator-webhook-server-cert" not found Nov 28 11:24:42 crc kubenswrapper[4923]: I1128 11:24:42.109794 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4c92eac3-127a-4b96-adf3-e3e52ba9015d-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx\" (UID: \"4c92eac3-127a-4b96-adf3-e3e52ba9015d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" Nov 28 11:24:42 crc kubenswrapper[4923]: E1128 11:24:42.109921 4923 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 11:24:42 crc kubenswrapper[4923]: E1128 11:24:42.110213 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4c92eac3-127a-4b96-adf3-e3e52ba9015d-cert podName:4c92eac3-127a-4b96-adf3-e3e52ba9015d nodeName:}" failed. No retries permitted until 2025-11-28 11:24:50.110199293 +0000 UTC m=+969.238883503 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4c92eac3-127a-4b96-adf3-e3e52ba9015d-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" (UID: "4c92eac3-127a-4b96-adf3-e3e52ba9015d") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 11:24:42 crc kubenswrapper[4923]: I1128 11:24:42.718290 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-metrics-certs\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:42 crc kubenswrapper[4923]: I1128 11:24:42.718429 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-webhook-certs\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:42 crc kubenswrapper[4923]: E1128 11:24:42.718676 4923 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 11:24:42 crc kubenswrapper[4923]: E1128 11:24:42.718751 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-webhook-certs podName:7d3c8ccd-8582-467e-9017-4f08eaac26ab nodeName:}" failed. No retries permitted until 2025-11-28 11:24:50.718726168 +0000 UTC m=+969.847410418 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-webhook-certs") pod "openstack-operator-controller-manager-6fbf799579-4qrnz" (UID: "7d3c8ccd-8582-467e-9017-4f08eaac26ab") : secret "webhook-server-cert" not found Nov 28 11:24:42 crc kubenswrapper[4923]: E1128 11:24:42.718818 4923 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 11:24:42 crc kubenswrapper[4923]: E1128 11:24:42.718963 4923 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-metrics-certs podName:7d3c8ccd-8582-467e-9017-4f08eaac26ab nodeName:}" failed. No retries permitted until 2025-11-28 11:24:50.718906573 +0000 UTC m=+969.847590833 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-metrics-certs") pod "openstack-operator-controller-manager-6fbf799579-4qrnz" (UID: "7d3c8ccd-8582-467e-9017-4f08eaac26ab") : secret "metrics-server-cert" not found Nov 28 11:24:44 crc kubenswrapper[4923]: I1128 11:24:44.026169 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:24:44 crc kubenswrapper[4923]: I1128 11:24:44.026265 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:24:44 crc kubenswrapper[4923]: I1128 11:24:44.026332 4923 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:24:44 crc kubenswrapper[4923]: I1128 11:24:44.027324 4923 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1125a66670947f90cf2e295b500044f466e54c6f2bb9f5eb7e6841beb4d77d04"} pod="openshift-machine-config-operator/machine-config-daemon-bwdth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 11:24:44 crc kubenswrapper[4923]: I1128 11:24:44.027435 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" containerID="cri-o://1125a66670947f90cf2e295b500044f466e54c6f2bb9f5eb7e6841beb4d77d04" gracePeriod=600 Nov 28 11:24:48 crc kubenswrapper[4923]: I1128 11:24:48.161575 4923 generic.go:334] "Generic (PLEG): container finished" podID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerID="1125a66670947f90cf2e295b500044f466e54c6f2bb9f5eb7e6841beb4d77d04" exitCode=0 Nov 28 11:24:48 crc kubenswrapper[4923]: I1128 11:24:48.161853 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerDied","Data":"1125a66670947f90cf2e295b500044f466e54c6f2bb9f5eb7e6841beb4d77d04"} Nov 28 11:24:48 crc kubenswrapper[4923]: I1128 11:24:48.162273 4923 scope.go:117] "RemoveContainer" containerID="677ed572a7b0e83cdbaab7053a3f1b65f579449e7b5bb37190e07948114a0b10" Nov 28 11:24:49 crc kubenswrapper[4923]: E1128 11:24:49.088492 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:2ee37ff474bee3203447df4f326a9279a515e770573153338296dd074722c677" Nov 28 11:24:49 crc kubenswrapper[4923]: E1128 11:24:49.088662 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:2ee37ff474bee3203447df4f326a9279a515e770573153338296dd074722c677,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nsl4q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-5b77f656f-jtpmz_openstack-operators(2ea0b3c3-a5d4-4b2f-81ef-a52573d37e06): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 11:24:49 crc kubenswrapper[4923]: I1128 11:24:49.528320 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ec4c9bb0-95fa-4840-8b48-de2b822bb788-cert\") pod \"infra-operator-controller-manager-57548d458d-2snwf\" (UID: \"ec4c9bb0-95fa-4840-8b48-de2b822bb788\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" Nov 28 11:24:49 crc kubenswrapper[4923]: I1128 11:24:49.535570 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ec4c9bb0-95fa-4840-8b48-de2b822bb788-cert\") pod \"infra-operator-controller-manager-57548d458d-2snwf\" (UID: \"ec4c9bb0-95fa-4840-8b48-de2b822bb788\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" Nov 28 11:24:49 crc kubenswrapper[4923]: I1128 11:24:49.564036 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-s4jvb" Nov 28 11:24:49 crc kubenswrapper[4923]: I1128 11:24:49.573759 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" Nov 28 11:24:49 crc kubenswrapper[4923]: E1128 11:24:49.877834 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:d65dbfc956e9cf376f3c48fc3a0942cb7306b5164f898c40d1efca106df81db7" Nov 28 11:24:49 crc kubenswrapper[4923]: E1128 11:24:49.878455 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:d65dbfc956e9cf376f3c48fc3a0942cb7306b5164f898c40d1efca106df81db7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hnghs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-67cb4dc6d4-j72jx_openstack-operators(52b8b64e-5401-41ef-8d65-cc275cdaf832): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 11:24:50 crc kubenswrapper[4923]: I1128 11:24:50.134468 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4c92eac3-127a-4b96-adf3-e3e52ba9015d-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx\" (UID: \"4c92eac3-127a-4b96-adf3-e3e52ba9015d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" Nov 28 11:24:50 crc kubenswrapper[4923]: I1128 11:24:50.149721 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4c92eac3-127a-4b96-adf3-e3e52ba9015d-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx\" (UID: \"4c92eac3-127a-4b96-adf3-e3e52ba9015d\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" Nov 28 11:24:50 crc kubenswrapper[4923]: I1128 11:24:50.152826 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-s2bsh" Nov 28 11:24:50 crc kubenswrapper[4923]: I1128 11:24:50.161297 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" Nov 28 11:24:50 crc kubenswrapper[4923]: I1128 11:24:50.745995 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-metrics-certs\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:50 crc kubenswrapper[4923]: I1128 11:24:50.746166 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-webhook-certs\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:50 crc kubenswrapper[4923]: I1128 11:24:50.749615 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-webhook-certs\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:50 crc kubenswrapper[4923]: I1128 11:24:50.749657 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d3c8ccd-8582-467e-9017-4f08eaac26ab-metrics-certs\") pod \"openstack-operator-controller-manager-6fbf799579-4qrnz\" (UID: \"7d3c8ccd-8582-467e-9017-4f08eaac26ab\") " pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:50 crc kubenswrapper[4923]: I1128 11:24:50.939140 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-mtmmb" Nov 28 11:24:50 crc kubenswrapper[4923]: I1128 11:24:50.948136 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:24:51 crc kubenswrapper[4923]: E1128 11:24:51.396173 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:3dbf9fd9dce75f1fb250ee4c4097ad77d2f34110b61d85e37abd9c472e022e6c" Nov 28 11:24:51 crc kubenswrapper[4923]: E1128 11:24:51.396473 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:3dbf9fd9dce75f1fb250ee4c4097ad77d2f34110b61d85e37abd9c472e022e6c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xn2pm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-7b64f4fb85-kbm2r_openstack-operators(eb007735-97dd-4d13-9b3d-28adefb557e1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 11:24:52 crc kubenswrapper[4923]: E1128 11:24:52.037800 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:bbb543d2d67c73e5df5d6357c3251363eb34a99575c5bf10416edd45dbdae2f6" Nov 28 11:24:52 crc kubenswrapper[4923]: E1128 11:24:52.038031 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:bbb543d2d67c73e5df5d6357c3251363eb34a99575c5bf10416edd45dbdae2f6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-z6qmp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-56897c768d-sv67d_openstack-operators(e00c03c3-987c-4a5a-9c6f-2d15cd86a639): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 11:24:56 crc kubenswrapper[4923]: E1128 11:24:56.639154 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:888edf6f432e52eaa5fc3caeae616fe38a3302b006bbba0e38885b2beba9f0f2" Nov 28 11:24:56 crc kubenswrapper[4923]: E1128 11:24:56.640269 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:888edf6f432e52eaa5fc3caeae616fe38a3302b006bbba0e38885b2beba9f0f2,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qvbvn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-66f4dd4bc7-qc4bb_openstack-operators(bd6c3e5b-2eb9-4f4b-8893-07aab7091fab): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 11:24:57 crc kubenswrapper[4923]: E1128 11:24:57.211837 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711" Nov 28 11:24:57 crc kubenswrapper[4923]: E1128 11:24:57.213307 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ns6mc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7b4567c7cf-sk9fr_openstack-operators(c434efb7-70cf-4c94-be0d-9635325d758c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 11:25:08 crc kubenswrapper[4923]: E1128 11:25:08.957350 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Nov 28 11:25:08 crc kubenswrapper[4923]: E1128 11:25:08.958059 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4dsww,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-zl5xb_openstack-operators(44d18a2d-97f3-4e4a-82bf-6de8634c7585): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 11:25:08 crc kubenswrapper[4923]: E1128 11:25:08.959186 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zl5xb" podUID="44d18a2d-97f3-4e4a-82bf-6de8634c7585" Nov 28 11:25:09 crc kubenswrapper[4923]: I1128 11:25:09.569843 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-2snwf"] Nov 28 11:25:09 crc kubenswrapper[4923]: I1128 11:25:09.681476 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx"] Nov 28 11:25:09 crc kubenswrapper[4923]: I1128 11:25:09.698754 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz"] Nov 28 11:25:10 crc kubenswrapper[4923]: I1128 11:25:10.375651 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-8ftq2" event={"ID":"e469dd36-fba4-4342-8fd6-ef847f821393","Type":"ContainerStarted","Data":"03472ceadfe5dd4dbd5a1bf9c00325344ab912bec3da41196040e1b726c852da"} Nov 28 11:25:10 crc kubenswrapper[4923]: I1128 11:25:10.382499 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vvgng" event={"ID":"f9c94487-8e74-4cc0-ac40-834c175a770f","Type":"ContainerStarted","Data":"21635ed30445f2f81bd905fdfdc4cc6af7dc54e93601cbbddd0bda10a0cce71d"} Nov 28 11:25:10 crc kubenswrapper[4923]: I1128 11:25:10.384827 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" event={"ID":"4c92eac3-127a-4b96-adf3-e3e52ba9015d","Type":"ContainerStarted","Data":"37da2cfaf99e8cac42d3270b5e3371683af4ce45fa07a16b3df9799af7d5b33a"} Nov 28 11:25:10 crc kubenswrapper[4923]: I1128 11:25:10.388360 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" event={"ID":"ec4c9bb0-95fa-4840-8b48-de2b822bb788","Type":"ContainerStarted","Data":"170ce5ca5eebc0df281618410d1c208a54219ddb01eabdebb9eb6fb609f8d98b"} Nov 28 11:25:10 crc kubenswrapper[4923]: I1128 11:25:10.410498 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n887x" event={"ID":"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab","Type":"ContainerStarted","Data":"73a38c3b78ad535f4c04e218448feab9f07a2b73537ae7f6e6f7ddf11fbc4e26"} Nov 28 11:25:10 crc kubenswrapper[4923]: I1128 11:25:10.423498 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-p9k2s" event={"ID":"09a1376c-00d7-4540-a905-078c297241cb","Type":"ContainerStarted","Data":"2ce3cbc8cceab9f17286f2e59a1af10520c8b77b602368e82025a1de54523bd2"} Nov 28 11:25:10 crc kubenswrapper[4923]: I1128 11:25:10.437137 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-kmtzj" event={"ID":"2ef6846b-733b-4c63-8add-5c3251658a7e","Type":"ContainerStarted","Data":"edda06b94d1464359370740d112503e1e4425998fb8e0502c5f87332057667cf"} Nov 28 11:25:10 crc kubenswrapper[4923]: I1128 11:25:10.439670 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" event={"ID":"7d3c8ccd-8582-467e-9017-4f08eaac26ab","Type":"ContainerStarted","Data":"a21beeb0fe7a758ccb33abc4106bdb7301f9217eadd500d2d9d6204a085da935"} Nov 28 11:25:10 crc kubenswrapper[4923]: I1128 11:25:10.441980 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-98w4g" event={"ID":"a813ab03-9734-4a76-aef0-62c7606c85d5","Type":"ContainerStarted","Data":"9280844c21efeb67c45949f28dc83bad8a9f0355ed5be74677a91b4fd0631cf9"} Nov 28 11:25:10 crc kubenswrapper[4923]: I1128 11:25:10.444265 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerStarted","Data":"e854d096d0336c4d9ad4dac3da4cdf01df8dfe8d9a2f05530bd236f4a045e2f0"} Nov 28 11:25:10 crc kubenswrapper[4923]: I1128 11:25:10.451134 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-9z8n7" event={"ID":"5d041894-6ce7-401f-9b0b-5d5a9e31a68d","Type":"ContainerStarted","Data":"a806d299ac81b8700f1acf30245a9c16cfd8c44197c399d94320e775787944fe"} Nov 28 11:25:10 crc kubenswrapper[4923]: I1128 11:25:10.461682 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-n887x" podStartSLOduration=12.008996739 podStartE2EDuration="38.461665344s" podCreationTimestamp="2025-11-28 11:24:32 +0000 UTC" firstStartedPulling="2025-11-28 11:24:35.946981337 +0000 UTC m=+955.075665547" lastFinishedPulling="2025-11-28 11:25:02.399649902 +0000 UTC m=+981.528334152" observedRunningTime="2025-11-28 11:25:10.446160887 +0000 UTC m=+989.574845097" watchObservedRunningTime="2025-11-28 11:25:10.461665344 +0000 UTC m=+989.590349554" Nov 28 11:25:11 crc kubenswrapper[4923]: I1128 11:25:11.464724 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-28rch" event={"ID":"b5fe44dd-beba-450d-a04a-59f3046ab0bb","Type":"ContainerStarted","Data":"28d55dc76c597470bd5314d0c3cf2e5096e70b4ae74aa1da4068ee0a20f20d5e"} Nov 28 11:25:11 crc kubenswrapper[4923]: I1128 11:25:11.476843 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-2tvqk" event={"ID":"f6fd5062-da63-4fb0-bb4d-80643cb85ca7","Type":"ContainerStarted","Data":"682e1d2f90de84eb65a180618cb946c7f9553b000a1485ade2b1404ff5a3ae34"} Nov 28 11:25:11 crc kubenswrapper[4923]: I1128 11:25:11.481614 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-v8vn8" event={"ID":"ed2b1137-a903-4224-b706-304a2f416007","Type":"ContainerStarted","Data":"79028805613fcba06b0ad8f3d5a6e497554f20def6db3c43377b87a1682fabf7"} Nov 28 11:25:11 crc kubenswrapper[4923]: I1128 11:25:11.489658 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5vgzd" event={"ID":"1745a114-0278-4e37-9f5a-34ccaa421f19","Type":"ContainerStarted","Data":"3ca3ab3efaa7105fa506bcd1b92db8e8b332c25e007a9fda11b2db9b5fa8f95e"} Nov 28 11:25:12 crc kubenswrapper[4923]: I1128 11:25:12.521868 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-t9tvv" event={"ID":"f193a1a9-1c5d-4d16-a9c1-3a17530bed74","Type":"ContainerStarted","Data":"34447e66b3ab82adca11489a0fc8697c29702a0728f94ec327f7e5287546ab2c"} Nov 28 11:25:12 crc kubenswrapper[4923]: I1128 11:25:12.527724 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-ktqqj" event={"ID":"c2fd1946-a3cb-453e-a1f3-458e14cb35ec","Type":"ContainerStarted","Data":"dd6a4c7dccb8c4441fc9d77f05315b74349200a8251345eca47d06aa7556204c"} Nov 28 11:25:12 crc kubenswrapper[4923]: I1128 11:25:12.533292 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" event={"ID":"7d3c8ccd-8582-467e-9017-4f08eaac26ab","Type":"ContainerStarted","Data":"d54fa902af24213e87808d785eccbaba47b57b46533f563e19448d94875e4260"} Nov 28 11:25:12 crc kubenswrapper[4923]: I1128 11:25:12.533595 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:25:12 crc kubenswrapper[4923]: I1128 11:25:12.541800 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-646x4" event={"ID":"4a5bf8f0-87dd-4e04-bd23-8379f541b020","Type":"ContainerStarted","Data":"7ff1d605e4cc618b5fb997da37efbb01ef6f6f0da0244e1eaa84b6da0d744665"} Nov 28 11:25:12 crc kubenswrapper[4923]: I1128 11:25:12.562751 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" podStartSLOduration=38.562735323 podStartE2EDuration="38.562735323s" podCreationTimestamp="2025-11-28 11:24:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:25:12.559943394 +0000 UTC m=+991.688627604" watchObservedRunningTime="2025-11-28 11:25:12.562735323 +0000 UTC m=+991.691419533" Nov 28 11:25:13 crc kubenswrapper[4923]: I1128 11:25:13.266196 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-n887x" Nov 28 11:25:13 crc kubenswrapper[4923]: I1128 11:25:13.266407 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-n887x" Nov 28 11:25:14 crc kubenswrapper[4923]: I1128 11:25:14.314878 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-n887x" podUID="6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab" containerName="registry-server" probeResult="failure" output=< Nov 28 11:25:14 crc kubenswrapper[4923]: timeout: failed to connect service ":50051" within 1s Nov 28 11:25:14 crc kubenswrapper[4923]: > Nov 28 11:25:15 crc kubenswrapper[4923]: E1128 11:25:15.649456 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-kbm2r" podUID="eb007735-97dd-4d13-9b3d-28adefb557e1" Nov 28 11:25:15 crc kubenswrapper[4923]: E1128 11:25:15.916997 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-jtpmz" podUID="2ea0b3c3-a5d4-4b2f-81ef-a52573d37e06" Nov 28 11:25:15 crc kubenswrapper[4923]: E1128 11:25:15.980611 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-sv67d" podUID="e00c03c3-987c-4a5a-9c6f-2d15cd86a639" Nov 28 11:25:16 crc kubenswrapper[4923]: E1128 11:25:16.043943 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-sk9fr" podUID="c434efb7-70cf-4c94-be0d-9635325d758c" Nov 28 11:25:16 crc kubenswrapper[4923]: E1128 11:25:16.083714 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-j72jx" podUID="52b8b64e-5401-41ef-8d65-cc275cdaf832" Nov 28 11:25:16 crc kubenswrapper[4923]: E1128 11:25:16.094387 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qc4bb" podUID="bd6c3e5b-2eb9-4f4b-8893-07aab7091fab" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.590548 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-sk9fr" event={"ID":"c434efb7-70cf-4c94-be0d-9635325d758c","Type":"ContainerStarted","Data":"889737d1e8aabc3f39830fd28802c41223cb7e78a3169cb7ae1598812b39fdc2"} Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.604224 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" event={"ID":"ec4c9bb0-95fa-4840-8b48-de2b822bb788","Type":"ContainerStarted","Data":"6b0ea7d5d44264d463a76cd90663437860fcdf4bf482290ce85d2458f02e2578"} Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.604277 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" event={"ID":"ec4c9bb0-95fa-4840-8b48-de2b822bb788","Type":"ContainerStarted","Data":"27d33059e4cc4f3dcc2cc5071847b5ef31f8c89c3485630b4dd7e37c87d0fe2e"} Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.604923 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.614743 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-j72jx" event={"ID":"52b8b64e-5401-41ef-8d65-cc275cdaf832","Type":"ContainerStarted","Data":"fb04bc32f163480f925421cb640235650ef26dbfe83a519ffb64b87c1c04396b"} Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.621792 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" event={"ID":"4c92eac3-127a-4b96-adf3-e3e52ba9015d","Type":"ContainerStarted","Data":"68b8e631fa2515c086381ec1161f0583f03863f06d9ed325e2b82e46637a98f3"} Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.621848 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" event={"ID":"4c92eac3-127a-4b96-adf3-e3e52ba9015d","Type":"ContainerStarted","Data":"137e8c3b51bcbcb0046305ecc423a03edf02ce236fc92420f9d5b7a72f603ead"} Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.623965 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.633446 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-v8vn8" event={"ID":"ed2b1137-a903-4224-b706-304a2f416007","Type":"ContainerStarted","Data":"d8bef02bd04930f80d0308dad24cd40112928733c06fe9afed4563c148f59e1a"} Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.634261 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-v8vn8" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.640868 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-v8vn8" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.657468 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-98w4g" event={"ID":"a813ab03-9734-4a76-aef0-62c7606c85d5","Type":"ContainerStarted","Data":"f826ec115145d8014ee1cf70d097a58d2ddd97d3e86581f61cdfaa00623d8ad8"} Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.658567 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-98w4g" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.666620 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" podStartSLOduration=38.215779267 podStartE2EDuration="43.666602085s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:25:09.93916765 +0000 UTC m=+989.067851870" lastFinishedPulling="2025-11-28 11:25:15.389990478 +0000 UTC m=+994.518674688" observedRunningTime="2025-11-28 11:25:16.650487181 +0000 UTC m=+995.779171381" watchObservedRunningTime="2025-11-28 11:25:16.666602085 +0000 UTC m=+995.795286295" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.667252 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qc4bb" event={"ID":"bd6c3e5b-2eb9-4f4b-8893-07aab7091fab","Type":"ContainerStarted","Data":"bdd85922786a820d48674451e54a2f3aeaacc7a4303d8ada30f1c26ccff90cbe"} Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.667348 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-98w4g" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.685600 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-t9tvv" event={"ID":"f193a1a9-1c5d-4d16-a9c1-3a17530bed74","Type":"ContainerStarted","Data":"e47fea516014b7e1805e5704e34a60914ce8016542e3d3d0ddbb25119a0cd4a2"} Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.686244 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-t9tvv" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.692778 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-t9tvv" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.697742 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-jtpmz" event={"ID":"2ea0b3c3-a5d4-4b2f-81ef-a52573d37e06","Type":"ContainerStarted","Data":"5cb7d66542131b2addb7f83c3c18bc9ad7193d4b22744fc30a1c3364f71e1f9f"} Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.709763 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vvgng" event={"ID":"f9c94487-8e74-4cc0-ac40-834c175a770f","Type":"ContainerStarted","Data":"0e68cdbc698f533f6f2401c2f50d99878cad297280333650f74b6cb72a5d66c6"} Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.710596 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vvgng" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.720134 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vvgng" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.726632 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-sv67d" event={"ID":"e00c03c3-987c-4a5a-9c6f-2d15cd86a639","Type":"ContainerStarted","Data":"60a1281cb4b20599955214075c9ef6974849bdd837f4d9017a1633eb1c42fe7d"} Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.742052 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-8ftq2" event={"ID":"e469dd36-fba4-4342-8fd6-ef847f821393","Type":"ContainerStarted","Data":"4c8f3cbe76b56b730a23269849bd2b6acd76474c4f1c030d52fb61851c1794ec"} Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.742817 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-8ftq2" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.748508 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-9z8n7" event={"ID":"5d041894-6ce7-401f-9b0b-5d5a9e31a68d","Type":"ContainerStarted","Data":"cb5e13cc94ec2cad6fe4c8af38e5bd765348a0cd93e6f1f555e63e45d3707ae5"} Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.749207 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-9z8n7" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.761222 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-8ftq2" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.761359 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-9z8n7" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.765782 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" podStartSLOduration=38.361095694 podStartE2EDuration="43.765767564s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:25:09.967197388 +0000 UTC m=+989.095881598" lastFinishedPulling="2025-11-28 11:25:15.371869248 +0000 UTC m=+994.500553468" observedRunningTime="2025-11-28 11:25:16.761548006 +0000 UTC m=+995.890232216" watchObservedRunningTime="2025-11-28 11:25:16.765767564 +0000 UTC m=+995.894451794" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.775004 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-kbm2r" event={"ID":"eb007735-97dd-4d13-9b3d-28adefb557e1","Type":"ContainerStarted","Data":"66fb1f79c97b491e0216a9ad30bf9c3bbbf28bdfb31c15c2cab24bd34f2a5b5b"} Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.787416 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-v8vn8" podStartSLOduration=4.118179647 podStartE2EDuration="43.787401313s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:35.863851888 +0000 UTC m=+954.992536098" lastFinishedPulling="2025-11-28 11:25:15.533073554 +0000 UTC m=+994.661757764" observedRunningTime="2025-11-28 11:25:16.787143606 +0000 UTC m=+995.915827816" watchObservedRunningTime="2025-11-28 11:25:16.787401313 +0000 UTC m=+995.916085513" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.919105 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d77b94747-t9tvv" podStartSLOduration=4.012111731 podStartE2EDuration="43.919086139s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:35.849940196 +0000 UTC m=+954.978624406" lastFinishedPulling="2025-11-28 11:25:15.756914604 +0000 UTC m=+994.885598814" observedRunningTime="2025-11-28 11:25:16.881894333 +0000 UTC m=+996.010578543" watchObservedRunningTime="2025-11-28 11:25:16.919086139 +0000 UTC m=+996.047770349" Nov 28 11:25:16 crc kubenswrapper[4923]: I1128 11:25:16.920561 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-98w4g" podStartSLOduration=3.831290723 podStartE2EDuration="43.920556851s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:35.512233022 +0000 UTC m=+954.640917232" lastFinishedPulling="2025-11-28 11:25:15.60149915 +0000 UTC m=+994.730183360" observedRunningTime="2025-11-28 11:25:16.91841903 +0000 UTC m=+996.047103240" watchObservedRunningTime="2025-11-28 11:25:16.920556851 +0000 UTC m=+996.049241061" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.019620 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vvgng" podStartSLOduration=3.945122946 podStartE2EDuration="44.019598268s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:35.653445606 +0000 UTC m=+954.782129816" lastFinishedPulling="2025-11-28 11:25:15.727920938 +0000 UTC m=+994.856605138" observedRunningTime="2025-11-28 11:25:17.017605542 +0000 UTC m=+996.146289752" watchObservedRunningTime="2025-11-28 11:25:17.019598268 +0000 UTC m=+996.148282468" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.021080 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-955677c94-8ftq2" podStartSLOduration=4.049748541 podStartE2EDuration="44.021075899s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:35.548060901 +0000 UTC m=+954.676745111" lastFinishedPulling="2025-11-28 11:25:15.519388259 +0000 UTC m=+994.648072469" observedRunningTime="2025-11-28 11:25:16.991413015 +0000 UTC m=+996.120097225" watchObservedRunningTime="2025-11-28 11:25:17.021075899 +0000 UTC m=+996.149760109" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.161312 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-9z8n7" podStartSLOduration=4.111089306 podStartE2EDuration="44.161293575s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:35.665780413 +0000 UTC m=+954.794464623" lastFinishedPulling="2025-11-28 11:25:15.715984682 +0000 UTC m=+994.844668892" observedRunningTime="2025-11-28 11:25:17.125390235 +0000 UTC m=+996.254074445" watchObservedRunningTime="2025-11-28 11:25:17.161293575 +0000 UTC m=+996.289977785" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.791565 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-2tvqk" event={"ID":"f6fd5062-da63-4fb0-bb4d-80643cb85ca7","Type":"ContainerStarted","Data":"509c19c81076e17e51caf00a0358be5824e5b36bc3dea66cb605b3300c23ce28"} Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.793062 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-2tvqk" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.802256 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-2tvqk" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.805389 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-p9k2s" event={"ID":"09a1376c-00d7-4540-a905-078c297241cb","Type":"ContainerStarted","Data":"de6628c247ef45466a15853614db52ec5f02d7daaec42220ae2168b56aeb118e"} Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.806370 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-p9k2s" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.809191 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-p9k2s" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.813112 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5vgzd" event={"ID":"1745a114-0278-4e37-9f5a-34ccaa421f19","Type":"ContainerStarted","Data":"78cb9fbfb7ea919dd541c29f59ea89cf02d7d25d76e0bb77ce7fc1bbbeedf1a4"} Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.813498 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5vgzd" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.820824 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5vgzd" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.825559 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-2tvqk" podStartSLOduration=5.208065319 podStartE2EDuration="44.825533519s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:36.077740037 +0000 UTC m=+955.206424247" lastFinishedPulling="2025-11-28 11:25:15.695208237 +0000 UTC m=+994.823892447" observedRunningTime="2025-11-28 11:25:17.821247968 +0000 UTC m=+996.949932178" watchObservedRunningTime="2025-11-28 11:25:17.825533519 +0000 UTC m=+996.954217769" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.826856 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-kmtzj" event={"ID":"2ef6846b-733b-4c63-8add-5c3251658a7e","Type":"ContainerStarted","Data":"668e913ee4e370ead7f7d0fbbc5f333901c2bb999aad00b9af0637fcb8a375ff"} Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.828592 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-kmtzj" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.829872 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-kmtzj" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.833163 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-ktqqj" event={"ID":"c2fd1946-a3cb-453e-a1f3-458e14cb35ec","Type":"ContainerStarted","Data":"6504777e789ae12f75fae75547179443762be140cc8f89a4213bdcd426c8bdf1"} Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.833992 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-ktqqj" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.838617 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-ktqqj" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.851182 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-28rch" event={"ID":"b5fe44dd-beba-450d-a04a-59f3046ab0bb","Type":"ContainerStarted","Data":"c10573e6c3e82bc96bf6f847947ef8508355c2f7744e65d6e85f2940343b0e8a"} Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.852728 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-28rch" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.858102 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-28rch" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.861622 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-646x4" event={"ID":"4a5bf8f0-87dd-4e04-bd23-8379f541b020","Type":"ContainerStarted","Data":"259d170a5a34fc8fd253c13d771248a586e2ad09c49efb3038acace8d2218cdd"} Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.862100 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-646x4" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.879291 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-646x4" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.884023 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5vgzd" podStartSLOduration=4.954009549 podStartE2EDuration="44.883990504s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:36.051611762 +0000 UTC m=+955.180295972" lastFinishedPulling="2025-11-28 11:25:15.981592717 +0000 UTC m=+995.110276927" observedRunningTime="2025-11-28 11:25:17.869060734 +0000 UTC m=+996.997744954" watchObservedRunningTime="2025-11-28 11:25:17.883990504 +0000 UTC m=+997.012674714" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.919821 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-p9k2s" podStartSLOduration=4.698540269 podStartE2EDuration="44.919799622s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:35.675586379 +0000 UTC m=+954.804270589" lastFinishedPulling="2025-11-28 11:25:15.896845732 +0000 UTC m=+995.025529942" observedRunningTime="2025-11-28 11:25:17.893972015 +0000 UTC m=+997.022656225" watchObservedRunningTime="2025-11-28 11:25:17.919799622 +0000 UTC m=+997.048483832" Nov 28 11:25:17 crc kubenswrapper[4923]: I1128 11:25:17.941997 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-kmtzj" podStartSLOduration=4.023887383 podStartE2EDuration="44.941970046s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:34.961137583 +0000 UTC m=+954.089821793" lastFinishedPulling="2025-11-28 11:25:15.879220246 +0000 UTC m=+995.007904456" observedRunningTime="2025-11-28 11:25:17.918402232 +0000 UTC m=+997.047086442" watchObservedRunningTime="2025-11-28 11:25:17.941970046 +0000 UTC m=+997.070654316" Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.004384 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-28rch" podStartSLOduration=5.094482482 podStartE2EDuration="45.004367552s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:36.080234087 +0000 UTC m=+955.208918297" lastFinishedPulling="2025-11-28 11:25:15.990119157 +0000 UTC m=+995.118803367" observedRunningTime="2025-11-28 11:25:17.952594365 +0000 UTC m=+997.081278565" watchObservedRunningTime="2025-11-28 11:25:18.004367552 +0000 UTC m=+997.133051762" Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.038179 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-646x4" podStartSLOduration=5.265724221 podStartE2EDuration="45.038151912s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:36.054530204 +0000 UTC m=+955.183214404" lastFinishedPulling="2025-11-28 11:25:15.826957885 +0000 UTC m=+994.955642095" observedRunningTime="2025-11-28 11:25:18.001320506 +0000 UTC m=+997.130004716" watchObservedRunningTime="2025-11-28 11:25:18.038151912 +0000 UTC m=+997.166836122" Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.051206 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-ktqqj" podStartSLOduration=5.212469182 podStartE2EDuration="45.051185549s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:36.023011187 +0000 UTC m=+955.151695397" lastFinishedPulling="2025-11-28 11:25:15.861727554 +0000 UTC m=+994.990411764" observedRunningTime="2025-11-28 11:25:18.03878632 +0000 UTC m=+997.167470530" watchObservedRunningTime="2025-11-28 11:25:18.051185549 +0000 UTC m=+997.179869759" Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.877922 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qc4bb" event={"ID":"bd6c3e5b-2eb9-4f4b-8893-07aab7091fab","Type":"ContainerStarted","Data":"d4d6a396eef8ea60cc22a207e9f659e40c9db38ad73f831a8983f4e1eff3500b"} Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.878298 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qc4bb" Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.879725 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-j72jx" event={"ID":"52b8b64e-5401-41ef-8d65-cc275cdaf832","Type":"ContainerStarted","Data":"25d6174e349acacad80e1eb306ae68cc5dbe4c21e3727705b66949f2ab351058"} Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.880001 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-j72jx" Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.881104 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-jtpmz" event={"ID":"2ea0b3c3-a5d4-4b2f-81ef-a52573d37e06","Type":"ContainerStarted","Data":"2d772d5b2d4977edeebe37c63151c68caa25ecbca36cfa5df7fcfc51e74daceb"} Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.881475 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-jtpmz" Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.882944 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-sk9fr" event={"ID":"c434efb7-70cf-4c94-be0d-9635325d758c","Type":"ContainerStarted","Data":"1a873ed55cf9a5f0cacedc4dba24c048f6e3cc8198ec7e79f7c6ea50066c5557"} Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.883289 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-sk9fr" Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.884899 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-sv67d" event={"ID":"e00c03c3-987c-4a5a-9c6f-2d15cd86a639","Type":"ContainerStarted","Data":"836e2294cade5261d794432ae7b9316754a1ef4747dcbbafa7aa22aa34fdb6d2"} Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.885508 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-sv67d" Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.888162 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-kbm2r" event={"ID":"eb007735-97dd-4d13-9b3d-28adefb557e1","Type":"ContainerStarted","Data":"25c187336c499edbf34166927e584b104712911075e397d570f9e48fb0650b24"} Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.888206 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-kbm2r" Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.904365 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qc4bb" podStartSLOduration=4.072256884 podStartE2EDuration="45.904316838s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:35.653978681 +0000 UTC m=+954.782662891" lastFinishedPulling="2025-11-28 11:25:17.486038635 +0000 UTC m=+996.614722845" observedRunningTime="2025-11-28 11:25:18.901153979 +0000 UTC m=+998.029838199" watchObservedRunningTime="2025-11-28 11:25:18.904316838 +0000 UTC m=+998.033001058" Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.928428 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-sk9fr" podStartSLOduration=4.170442838 podStartE2EDuration="45.928409496s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:35.697202678 +0000 UTC m=+954.825886888" lastFinishedPulling="2025-11-28 11:25:17.455169336 +0000 UTC m=+996.583853546" observedRunningTime="2025-11-28 11:25:18.925261018 +0000 UTC m=+998.053945228" watchObservedRunningTime="2025-11-28 11:25:18.928409496 +0000 UTC m=+998.057093706" Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.946663 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-jtpmz" podStartSLOduration=3.843026753 podStartE2EDuration="45.946641989s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:35.559095261 +0000 UTC m=+954.687779471" lastFinishedPulling="2025-11-28 11:25:17.662710507 +0000 UTC m=+996.791394707" observedRunningTime="2025-11-28 11:25:18.943921783 +0000 UTC m=+998.072605993" watchObservedRunningTime="2025-11-28 11:25:18.946641989 +0000 UTC m=+998.075326199" Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.965509 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-j72jx" podStartSLOduration=4.348826538 podStartE2EDuration="45.96548559s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:35.837791634 +0000 UTC m=+954.966475844" lastFinishedPulling="2025-11-28 11:25:17.454450686 +0000 UTC m=+996.583134896" observedRunningTime="2025-11-28 11:25:18.961083806 +0000 UTC m=+998.089768006" watchObservedRunningTime="2025-11-28 11:25:18.96548559 +0000 UTC m=+998.094169800" Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.984393 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-kbm2r" podStartSLOduration=3.995763702 podStartE2EDuration="45.984374881s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:35.673256524 +0000 UTC m=+954.801940734" lastFinishedPulling="2025-11-28 11:25:17.661867703 +0000 UTC m=+996.790551913" observedRunningTime="2025-11-28 11:25:18.982619662 +0000 UTC m=+998.111303872" watchObservedRunningTime="2025-11-28 11:25:18.984374881 +0000 UTC m=+998.113059091" Nov 28 11:25:18 crc kubenswrapper[4923]: I1128 11:25:18.995270 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-sv67d" podStartSLOduration=4.387870406 podStartE2EDuration="45.995251597s" podCreationTimestamp="2025-11-28 11:24:33 +0000 UTC" firstStartedPulling="2025-11-28 11:24:35.847783505 +0000 UTC m=+954.976467715" lastFinishedPulling="2025-11-28 11:25:17.455164696 +0000 UTC m=+996.583848906" observedRunningTime="2025-11-28 11:25:18.993497108 +0000 UTC m=+998.122181318" watchObservedRunningTime="2025-11-28 11:25:18.995251597 +0000 UTC m=+998.123935807" Nov 28 11:25:20 crc kubenswrapper[4923]: I1128 11:25:20.172683 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx" Nov 28 11:25:20 crc kubenswrapper[4923]: I1128 11:25:20.955121 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-6fbf799579-4qrnz" Nov 28 11:25:23 crc kubenswrapper[4923]: E1128 11:25:23.172233 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zl5xb" podUID="44d18a2d-97f3-4e4a-82bf-6de8634c7585" Nov 28 11:25:23 crc kubenswrapper[4923]: I1128 11:25:23.340847 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-n887x" Nov 28 11:25:23 crc kubenswrapper[4923]: I1128 11:25:23.401757 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-n887x" Nov 28 11:25:23 crc kubenswrapper[4923]: I1128 11:25:23.583714 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-n887x"] Nov 28 11:25:23 crc kubenswrapper[4923]: I1128 11:25:23.768491 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-jtpmz" Nov 28 11:25:23 crc kubenswrapper[4923]: I1128 11:25:23.882764 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-kbm2r" Nov 28 11:25:24 crc kubenswrapper[4923]: I1128 11:25:24.014682 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-sk9fr" Nov 28 11:25:24 crc kubenswrapper[4923]: I1128 11:25:24.024668 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-j72jx" Nov 28 11:25:24 crc kubenswrapper[4923]: I1128 11:25:24.118172 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qc4bb" Nov 28 11:25:24 crc kubenswrapper[4923]: I1128 11:25:24.633573 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-sv67d" Nov 28 11:25:24 crc kubenswrapper[4923]: I1128 11:25:24.924964 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-n887x" podUID="6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab" containerName="registry-server" containerID="cri-o://73a38c3b78ad535f4c04e218448feab9f07a2b73537ae7f6e6f7ddf11fbc4e26" gracePeriod=2 Nov 28 11:25:25 crc kubenswrapper[4923]: I1128 11:25:25.369572 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n887x" Nov 28 11:25:25 crc kubenswrapper[4923]: I1128 11:25:25.464601 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzt92\" (UniqueName: \"kubernetes.io/projected/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab-kube-api-access-rzt92\") pod \"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab\" (UID: \"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab\") " Nov 28 11:25:25 crc kubenswrapper[4923]: I1128 11:25:25.465112 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab-utilities\") pod \"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab\" (UID: \"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab\") " Nov 28 11:25:25 crc kubenswrapper[4923]: I1128 11:25:25.465747 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab-utilities" (OuterVolumeSpecName: "utilities") pod "6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab" (UID: "6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:25:25 crc kubenswrapper[4923]: I1128 11:25:25.465820 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab-catalog-content\") pod \"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab\" (UID: \"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab\") " Nov 28 11:25:25 crc kubenswrapper[4923]: I1128 11:25:25.466522 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:25:25 crc kubenswrapper[4923]: I1128 11:25:25.473507 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab-kube-api-access-rzt92" (OuterVolumeSpecName: "kube-api-access-rzt92") pod "6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab" (UID: "6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab"). InnerVolumeSpecName "kube-api-access-rzt92". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:25:25 crc kubenswrapper[4923]: I1128 11:25:25.507479 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab" (UID: "6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:25:25 crc kubenswrapper[4923]: I1128 11:25:25.567564 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:25:25 crc kubenswrapper[4923]: I1128 11:25:25.567600 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzt92\" (UniqueName: \"kubernetes.io/projected/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab-kube-api-access-rzt92\") on node \"crc\" DevicePath \"\"" Nov 28 11:25:25 crc kubenswrapper[4923]: I1128 11:25:25.938457 4923 generic.go:334] "Generic (PLEG): container finished" podID="6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab" containerID="73a38c3b78ad535f4c04e218448feab9f07a2b73537ae7f6e6f7ddf11fbc4e26" exitCode=0 Nov 28 11:25:25 crc kubenswrapper[4923]: I1128 11:25:25.938540 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n887x" event={"ID":"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab","Type":"ContainerDied","Data":"73a38c3b78ad535f4c04e218448feab9f07a2b73537ae7f6e6f7ddf11fbc4e26"} Nov 28 11:25:25 crc kubenswrapper[4923]: I1128 11:25:25.938567 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n887x" Nov 28 11:25:25 crc kubenswrapper[4923]: I1128 11:25:25.938621 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n887x" event={"ID":"6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab","Type":"ContainerDied","Data":"5c0d3c48d8e89c18967a9bc9f3d9abb9c9c3711474b085318b5e7ed5ab4bde5d"} Nov 28 11:25:25 crc kubenswrapper[4923]: I1128 11:25:25.938656 4923 scope.go:117] "RemoveContainer" containerID="73a38c3b78ad535f4c04e218448feab9f07a2b73537ae7f6e6f7ddf11fbc4e26" Nov 28 11:25:25 crc kubenswrapper[4923]: I1128 11:25:25.983642 4923 scope.go:117] "RemoveContainer" containerID="ffab78d48d82d6a4298e56437c47e1e016cb8ab65bab495f1a0239578c300a3f" Nov 28 11:25:25 crc kubenswrapper[4923]: I1128 11:25:25.987630 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-n887x"] Nov 28 11:25:25 crc kubenswrapper[4923]: I1128 11:25:25.994708 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-n887x"] Nov 28 11:25:26 crc kubenswrapper[4923]: I1128 11:25:26.014758 4923 scope.go:117] "RemoveContainer" containerID="0fbffee5c0cc9f70f29b58dd17a0d0b931279a175aa3107a205b61bfa1a8fb35" Nov 28 11:25:26 crc kubenswrapper[4923]: I1128 11:25:26.032256 4923 scope.go:117] "RemoveContainer" containerID="73a38c3b78ad535f4c04e218448feab9f07a2b73537ae7f6e6f7ddf11fbc4e26" Nov 28 11:25:26 crc kubenswrapper[4923]: E1128 11:25:26.032978 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73a38c3b78ad535f4c04e218448feab9f07a2b73537ae7f6e6f7ddf11fbc4e26\": container with ID starting with 73a38c3b78ad535f4c04e218448feab9f07a2b73537ae7f6e6f7ddf11fbc4e26 not found: ID does not exist" containerID="73a38c3b78ad535f4c04e218448feab9f07a2b73537ae7f6e6f7ddf11fbc4e26" Nov 28 11:25:26 crc kubenswrapper[4923]: I1128 11:25:26.033040 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73a38c3b78ad535f4c04e218448feab9f07a2b73537ae7f6e6f7ddf11fbc4e26"} err="failed to get container status \"73a38c3b78ad535f4c04e218448feab9f07a2b73537ae7f6e6f7ddf11fbc4e26\": rpc error: code = NotFound desc = could not find container \"73a38c3b78ad535f4c04e218448feab9f07a2b73537ae7f6e6f7ddf11fbc4e26\": container with ID starting with 73a38c3b78ad535f4c04e218448feab9f07a2b73537ae7f6e6f7ddf11fbc4e26 not found: ID does not exist" Nov 28 11:25:26 crc kubenswrapper[4923]: I1128 11:25:26.033072 4923 scope.go:117] "RemoveContainer" containerID="ffab78d48d82d6a4298e56437c47e1e016cb8ab65bab495f1a0239578c300a3f" Nov 28 11:25:26 crc kubenswrapper[4923]: E1128 11:25:26.034413 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffab78d48d82d6a4298e56437c47e1e016cb8ab65bab495f1a0239578c300a3f\": container with ID starting with ffab78d48d82d6a4298e56437c47e1e016cb8ab65bab495f1a0239578c300a3f not found: ID does not exist" containerID="ffab78d48d82d6a4298e56437c47e1e016cb8ab65bab495f1a0239578c300a3f" Nov 28 11:25:26 crc kubenswrapper[4923]: I1128 11:25:26.034446 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffab78d48d82d6a4298e56437c47e1e016cb8ab65bab495f1a0239578c300a3f"} err="failed to get container status \"ffab78d48d82d6a4298e56437c47e1e016cb8ab65bab495f1a0239578c300a3f\": rpc error: code = NotFound desc = could not find container \"ffab78d48d82d6a4298e56437c47e1e016cb8ab65bab495f1a0239578c300a3f\": container with ID starting with ffab78d48d82d6a4298e56437c47e1e016cb8ab65bab495f1a0239578c300a3f not found: ID does not exist" Nov 28 11:25:26 crc kubenswrapper[4923]: I1128 11:25:26.034460 4923 scope.go:117] "RemoveContainer" containerID="0fbffee5c0cc9f70f29b58dd17a0d0b931279a175aa3107a205b61bfa1a8fb35" Nov 28 11:25:26 crc kubenswrapper[4923]: E1128 11:25:26.034872 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0fbffee5c0cc9f70f29b58dd17a0d0b931279a175aa3107a205b61bfa1a8fb35\": container with ID starting with 0fbffee5c0cc9f70f29b58dd17a0d0b931279a175aa3107a205b61bfa1a8fb35 not found: ID does not exist" containerID="0fbffee5c0cc9f70f29b58dd17a0d0b931279a175aa3107a205b61bfa1a8fb35" Nov 28 11:25:26 crc kubenswrapper[4923]: I1128 11:25:26.034893 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0fbffee5c0cc9f70f29b58dd17a0d0b931279a175aa3107a205b61bfa1a8fb35"} err="failed to get container status \"0fbffee5c0cc9f70f29b58dd17a0d0b931279a175aa3107a205b61bfa1a8fb35\": rpc error: code = NotFound desc = could not find container \"0fbffee5c0cc9f70f29b58dd17a0d0b931279a175aa3107a205b61bfa1a8fb35\": container with ID starting with 0fbffee5c0cc9f70f29b58dd17a0d0b931279a175aa3107a205b61bfa1a8fb35 not found: ID does not exist" Nov 28 11:25:27 crc kubenswrapper[4923]: I1128 11:25:27.199521 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab" path="/var/lib/kubelet/pods/6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab/volumes" Nov 28 11:25:29 crc kubenswrapper[4923]: I1128 11:25:29.582766 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-2snwf" Nov 28 11:25:36 crc kubenswrapper[4923]: I1128 11:25:36.171626 4923 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 11:25:37 crc kubenswrapper[4923]: I1128 11:25:37.102539 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zl5xb" event={"ID":"44d18a2d-97f3-4e4a-82bf-6de8634c7585","Type":"ContainerStarted","Data":"f9c46dfa5ccc9ef1a995b6ae44c2699796fc2628d6cd153cd72d7b7ea54ec3e9"} Nov 28 11:25:37 crc kubenswrapper[4923]: I1128 11:25:37.125903 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zl5xb" podStartSLOduration=2.479688059 podStartE2EDuration="1m3.125880552s" podCreationTimestamp="2025-11-28 11:24:34 +0000 UTC" firstStartedPulling="2025-11-28 11:24:36.080057282 +0000 UTC m=+955.208741492" lastFinishedPulling="2025-11-28 11:25:36.726249765 +0000 UTC m=+1015.854933985" observedRunningTime="2025-11-28 11:25:37.123424162 +0000 UTC m=+1016.252108412" watchObservedRunningTime="2025-11-28 11:25:37.125880552 +0000 UTC m=+1016.254564772" Nov 28 11:25:52 crc kubenswrapper[4923]: I1128 11:25:52.875852 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7rsr2"] Nov 28 11:25:52 crc kubenswrapper[4923]: E1128 11:25:52.876559 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab" containerName="registry-server" Nov 28 11:25:52 crc kubenswrapper[4923]: I1128 11:25:52.876571 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab" containerName="registry-server" Nov 28 11:25:52 crc kubenswrapper[4923]: E1128 11:25:52.876607 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab" containerName="extract-content" Nov 28 11:25:52 crc kubenswrapper[4923]: I1128 11:25:52.876612 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab" containerName="extract-content" Nov 28 11:25:52 crc kubenswrapper[4923]: E1128 11:25:52.876623 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab" containerName="extract-utilities" Nov 28 11:25:52 crc kubenswrapper[4923]: I1128 11:25:52.876631 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab" containerName="extract-utilities" Nov 28 11:25:52 crc kubenswrapper[4923]: I1128 11:25:52.876772 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cf5f23b-f4d3-4c24-ac00-26588d4bb3ab" containerName="registry-server" Nov 28 11:25:52 crc kubenswrapper[4923]: I1128 11:25:52.877434 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-7rsr2" Nov 28 11:25:52 crc kubenswrapper[4923]: I1128 11:25:52.894311 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 28 11:25:52 crc kubenswrapper[4923]: I1128 11:25:52.894720 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 28 11:25:52 crc kubenswrapper[4923]: I1128 11:25:52.894945 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-d9ll4" Nov 28 11:25:52 crc kubenswrapper[4923]: I1128 11:25:52.895140 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 28 11:25:52 crc kubenswrapper[4923]: I1128 11:25:52.905628 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7rsr2"] Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.033479 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-7rt69"] Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.037596 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-7rt69" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.051855 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmklh\" (UniqueName: \"kubernetes.io/projected/06bf7bf6-2a65-40b4-9592-a942fb92d473-kube-api-access-cmklh\") pod \"dnsmasq-dns-675f4bcbfc-7rsr2\" (UID: \"06bf7bf6-2a65-40b4-9592-a942fb92d473\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7rsr2" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.051960 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06bf7bf6-2a65-40b4-9592-a942fb92d473-config\") pod \"dnsmasq-dns-675f4bcbfc-7rsr2\" (UID: \"06bf7bf6-2a65-40b4-9592-a942fb92d473\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7rsr2" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.059429 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.083800 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-7rt69"] Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.153768 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bff70415-e77c-4c9f-8955-f074232c0564-config\") pod \"dnsmasq-dns-78dd6ddcc-7rt69\" (UID: \"bff70415-e77c-4c9f-8955-f074232c0564\") " pod="openstack/dnsmasq-dns-78dd6ddcc-7rt69" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.153833 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbww4\" (UniqueName: \"kubernetes.io/projected/bff70415-e77c-4c9f-8955-f074232c0564-kube-api-access-rbww4\") pod \"dnsmasq-dns-78dd6ddcc-7rt69\" (UID: \"bff70415-e77c-4c9f-8955-f074232c0564\") " pod="openstack/dnsmasq-dns-78dd6ddcc-7rt69" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.153872 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06bf7bf6-2a65-40b4-9592-a942fb92d473-config\") pod \"dnsmasq-dns-675f4bcbfc-7rsr2\" (UID: \"06bf7bf6-2a65-40b4-9592-a942fb92d473\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7rsr2" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.153909 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bff70415-e77c-4c9f-8955-f074232c0564-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-7rt69\" (UID: \"bff70415-e77c-4c9f-8955-f074232c0564\") " pod="openstack/dnsmasq-dns-78dd6ddcc-7rt69" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.153967 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmklh\" (UniqueName: \"kubernetes.io/projected/06bf7bf6-2a65-40b4-9592-a942fb92d473-kube-api-access-cmklh\") pod \"dnsmasq-dns-675f4bcbfc-7rsr2\" (UID: \"06bf7bf6-2a65-40b4-9592-a942fb92d473\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7rsr2" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.155186 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06bf7bf6-2a65-40b4-9592-a942fb92d473-config\") pod \"dnsmasq-dns-675f4bcbfc-7rsr2\" (UID: \"06bf7bf6-2a65-40b4-9592-a942fb92d473\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7rsr2" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.190916 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmklh\" (UniqueName: \"kubernetes.io/projected/06bf7bf6-2a65-40b4-9592-a942fb92d473-kube-api-access-cmklh\") pod \"dnsmasq-dns-675f4bcbfc-7rsr2\" (UID: \"06bf7bf6-2a65-40b4-9592-a942fb92d473\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7rsr2" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.193324 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-7rsr2" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.254888 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bff70415-e77c-4c9f-8955-f074232c0564-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-7rt69\" (UID: \"bff70415-e77c-4c9f-8955-f074232c0564\") " pod="openstack/dnsmasq-dns-78dd6ddcc-7rt69" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.255193 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bff70415-e77c-4c9f-8955-f074232c0564-config\") pod \"dnsmasq-dns-78dd6ddcc-7rt69\" (UID: \"bff70415-e77c-4c9f-8955-f074232c0564\") " pod="openstack/dnsmasq-dns-78dd6ddcc-7rt69" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.255230 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbww4\" (UniqueName: \"kubernetes.io/projected/bff70415-e77c-4c9f-8955-f074232c0564-kube-api-access-rbww4\") pod \"dnsmasq-dns-78dd6ddcc-7rt69\" (UID: \"bff70415-e77c-4c9f-8955-f074232c0564\") " pod="openstack/dnsmasq-dns-78dd6ddcc-7rt69" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.256089 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bff70415-e77c-4c9f-8955-f074232c0564-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-7rt69\" (UID: \"bff70415-e77c-4c9f-8955-f074232c0564\") " pod="openstack/dnsmasq-dns-78dd6ddcc-7rt69" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.256308 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bff70415-e77c-4c9f-8955-f074232c0564-config\") pod \"dnsmasq-dns-78dd6ddcc-7rt69\" (UID: \"bff70415-e77c-4c9f-8955-f074232c0564\") " pod="openstack/dnsmasq-dns-78dd6ddcc-7rt69" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.279636 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbww4\" (UniqueName: \"kubernetes.io/projected/bff70415-e77c-4c9f-8955-f074232c0564-kube-api-access-rbww4\") pod \"dnsmasq-dns-78dd6ddcc-7rt69\" (UID: \"bff70415-e77c-4c9f-8955-f074232c0564\") " pod="openstack/dnsmasq-dns-78dd6ddcc-7rt69" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.423901 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-7rt69" Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.626803 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-7rt69"] Nov 28 11:25:53 crc kubenswrapper[4923]: I1128 11:25:53.634677 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7rsr2"] Nov 28 11:25:53 crc kubenswrapper[4923]: W1128 11:25:53.637169 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod06bf7bf6_2a65_40b4_9592_a942fb92d473.slice/crio-1fd9827d51ba21978630b4fbf0e576506296c926fcaec2dd95d7ec9b43c6ecc4 WatchSource:0}: Error finding container 1fd9827d51ba21978630b4fbf0e576506296c926fcaec2dd95d7ec9b43c6ecc4: Status 404 returned error can't find the container with id 1fd9827d51ba21978630b4fbf0e576506296c926fcaec2dd95d7ec9b43c6ecc4 Nov 28 11:25:53 crc kubenswrapper[4923]: W1128 11:25:53.638265 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbff70415_e77c_4c9f_8955_f074232c0564.slice/crio-8d662a06e839abc4751e184c7509640437eb9467275c10a545a62d14bfd4a574 WatchSource:0}: Error finding container 8d662a06e839abc4751e184c7509640437eb9467275c10a545a62d14bfd4a574: Status 404 returned error can't find the container with id 8d662a06e839abc4751e184c7509640437eb9467275c10a545a62d14bfd4a574 Nov 28 11:25:54 crc kubenswrapper[4923]: I1128 11:25:54.254154 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-7rt69" event={"ID":"bff70415-e77c-4c9f-8955-f074232c0564","Type":"ContainerStarted","Data":"8d662a06e839abc4751e184c7509640437eb9467275c10a545a62d14bfd4a574"} Nov 28 11:25:54 crc kubenswrapper[4923]: I1128 11:25:54.255667 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-7rsr2" event={"ID":"06bf7bf6-2a65-40b4-9592-a942fb92d473","Type":"ContainerStarted","Data":"1fd9827d51ba21978630b4fbf0e576506296c926fcaec2dd95d7ec9b43c6ecc4"} Nov 28 11:25:55 crc kubenswrapper[4923]: I1128 11:25:55.861336 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7rsr2"] Nov 28 11:25:55 crc kubenswrapper[4923]: I1128 11:25:55.905897 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-shqft"] Nov 28 11:25:55 crc kubenswrapper[4923]: I1128 11:25:55.906974 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" Nov 28 11:25:55 crc kubenswrapper[4923]: I1128 11:25:55.938864 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-shqft"] Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.011118 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69a9b4ec-46dc-427a-9398-658ec781a88c-config\") pod \"dnsmasq-dns-5ccc8479f9-shqft\" (UID: \"69a9b4ec-46dc-427a-9398-658ec781a88c\") " pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.011169 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9fs9\" (UniqueName: \"kubernetes.io/projected/69a9b4ec-46dc-427a-9398-658ec781a88c-kube-api-access-l9fs9\") pod \"dnsmasq-dns-5ccc8479f9-shqft\" (UID: \"69a9b4ec-46dc-427a-9398-658ec781a88c\") " pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.011235 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/69a9b4ec-46dc-427a-9398-658ec781a88c-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-shqft\" (UID: \"69a9b4ec-46dc-427a-9398-658ec781a88c\") " pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.112576 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/69a9b4ec-46dc-427a-9398-658ec781a88c-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-shqft\" (UID: \"69a9b4ec-46dc-427a-9398-658ec781a88c\") " pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.112636 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69a9b4ec-46dc-427a-9398-658ec781a88c-config\") pod \"dnsmasq-dns-5ccc8479f9-shqft\" (UID: \"69a9b4ec-46dc-427a-9398-658ec781a88c\") " pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.112671 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9fs9\" (UniqueName: \"kubernetes.io/projected/69a9b4ec-46dc-427a-9398-658ec781a88c-kube-api-access-l9fs9\") pod \"dnsmasq-dns-5ccc8479f9-shqft\" (UID: \"69a9b4ec-46dc-427a-9398-658ec781a88c\") " pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.114682 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/69a9b4ec-46dc-427a-9398-658ec781a88c-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-shqft\" (UID: \"69a9b4ec-46dc-427a-9398-658ec781a88c\") " pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.115435 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69a9b4ec-46dc-427a-9398-658ec781a88c-config\") pod \"dnsmasq-dns-5ccc8479f9-shqft\" (UID: \"69a9b4ec-46dc-427a-9398-658ec781a88c\") " pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.156516 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9fs9\" (UniqueName: \"kubernetes.io/projected/69a9b4ec-46dc-427a-9398-658ec781a88c-kube-api-access-l9fs9\") pod \"dnsmasq-dns-5ccc8479f9-shqft\" (UID: \"69a9b4ec-46dc-427a-9398-658ec781a88c\") " pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.224631 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.453640 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-7rt69"] Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.552442 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-z5rmn"] Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.554907 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.571524 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-z5rmn"] Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.727493 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4882651-e2b0-4ab5-911f-3f1755c56d18-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-z5rmn\" (UID: \"d4882651-e2b0-4ab5-911f-3f1755c56d18\") " pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.727776 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4882651-e2b0-4ab5-911f-3f1755c56d18-config\") pod \"dnsmasq-dns-57d769cc4f-z5rmn\" (UID: \"d4882651-e2b0-4ab5-911f-3f1755c56d18\") " pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.727793 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2snr\" (UniqueName: \"kubernetes.io/projected/d4882651-e2b0-4ab5-911f-3f1755c56d18-kube-api-access-n2snr\") pod \"dnsmasq-dns-57d769cc4f-z5rmn\" (UID: \"d4882651-e2b0-4ab5-911f-3f1755c56d18\") " pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.798612 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-shqft"] Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.829760 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4882651-e2b0-4ab5-911f-3f1755c56d18-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-z5rmn\" (UID: \"d4882651-e2b0-4ab5-911f-3f1755c56d18\") " pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.829833 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4882651-e2b0-4ab5-911f-3f1755c56d18-config\") pod \"dnsmasq-dns-57d769cc4f-z5rmn\" (UID: \"d4882651-e2b0-4ab5-911f-3f1755c56d18\") " pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.829853 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2snr\" (UniqueName: \"kubernetes.io/projected/d4882651-e2b0-4ab5-911f-3f1755c56d18-kube-api-access-n2snr\") pod \"dnsmasq-dns-57d769cc4f-z5rmn\" (UID: \"d4882651-e2b0-4ab5-911f-3f1755c56d18\") " pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.830583 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4882651-e2b0-4ab5-911f-3f1755c56d18-config\") pod \"dnsmasq-dns-57d769cc4f-z5rmn\" (UID: \"d4882651-e2b0-4ab5-911f-3f1755c56d18\") " pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.831093 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4882651-e2b0-4ab5-911f-3f1755c56d18-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-z5rmn\" (UID: \"d4882651-e2b0-4ab5-911f-3f1755c56d18\") " pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.864397 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2snr\" (UniqueName: \"kubernetes.io/projected/d4882651-e2b0-4ab5-911f-3f1755c56d18-kube-api-access-n2snr\") pod \"dnsmasq-dns-57d769cc4f-z5rmn\" (UID: \"d4882651-e2b0-4ab5-911f-3f1755c56d18\") " pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" Nov 28 11:25:56 crc kubenswrapper[4923]: I1128 11:25:56.884201 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.144524 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.148755 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.153998 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.154194 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.154296 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.154984 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.155089 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-5xc6j" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.155180 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.155311 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.191178 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.235215 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.235254 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.235289 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/456d70c2-443b-455b-83fe-fc87e36534ac-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.235305 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/456d70c2-443b-455b-83fe-fc87e36534ac-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.235321 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.235346 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/456d70c2-443b-455b-83fe-fc87e36534ac-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.235374 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/456d70c2-443b-455b-83fe-fc87e36534ac-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.235397 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkq5z\" (UniqueName: \"kubernetes.io/projected/456d70c2-443b-455b-83fe-fc87e36534ac-kube-api-access-mkq5z\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.235442 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.235464 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/456d70c2-443b-455b-83fe-fc87e36534ac-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.235481 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.336948 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.336997 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.337026 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/456d70c2-443b-455b-83fe-fc87e36534ac-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.337044 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/456d70c2-443b-455b-83fe-fc87e36534ac-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.337062 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.337083 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/456d70c2-443b-455b-83fe-fc87e36534ac-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.337109 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/456d70c2-443b-455b-83fe-fc87e36534ac-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.337130 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkq5z\" (UniqueName: \"kubernetes.io/projected/456d70c2-443b-455b-83fe-fc87e36534ac-kube-api-access-mkq5z\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.337172 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.337192 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/456d70c2-443b-455b-83fe-fc87e36534ac-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.337209 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.337323 4923 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.340633 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/456d70c2-443b-455b-83fe-fc87e36534ac-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.341609 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.341723 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.344187 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/456d70c2-443b-455b-83fe-fc87e36534ac-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.346298 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/456d70c2-443b-455b-83fe-fc87e36534ac-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.347320 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.348745 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/456d70c2-443b-455b-83fe-fc87e36534ac-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.356926 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkq5z\" (UniqueName: \"kubernetes.io/projected/456d70c2-443b-455b-83fe-fc87e36534ac-kube-api-access-mkq5z\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.357360 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/456d70c2-443b-455b-83fe-fc87e36534ac-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.358434 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.366067 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-z5rmn"] Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.366288 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" event={"ID":"69a9b4ec-46dc-427a-9398-658ec781a88c","Type":"ContainerStarted","Data":"b1185c328e52ddb168714df20b1dd82f7d105662a66a5efbc5e9cbb4acf13f17"} Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.376390 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.492007 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.651999 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.657761 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.661699 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-gslqj" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.662060 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.662157 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.662352 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.663423 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.663658 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.663782 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.671241 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.770847 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/26a4b167-a30a-4655-80aa-2177fe14784c-config-data\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.770899 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkpzp\" (UniqueName: \"kubernetes.io/projected/26a4b167-a30a-4655-80aa-2177fe14784c-kube-api-access-xkpzp\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.770951 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.770973 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/26a4b167-a30a-4655-80aa-2177fe14784c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.771015 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.771033 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/26a4b167-a30a-4655-80aa-2177fe14784c-server-conf\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.771066 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/26a4b167-a30a-4655-80aa-2177fe14784c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.771105 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.771142 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.771158 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/26a4b167-a30a-4655-80aa-2177fe14784c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.771180 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.872131 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.872166 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/26a4b167-a30a-4655-80aa-2177fe14784c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.872200 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.872225 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/26a4b167-a30a-4655-80aa-2177fe14784c-config-data\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.872250 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkpzp\" (UniqueName: \"kubernetes.io/projected/26a4b167-a30a-4655-80aa-2177fe14784c-kube-api-access-xkpzp\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.872271 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.872298 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/26a4b167-a30a-4655-80aa-2177fe14784c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.872345 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.872366 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/26a4b167-a30a-4655-80aa-2177fe14784c-server-conf\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.872405 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/26a4b167-a30a-4655-80aa-2177fe14784c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.872428 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.876485 4923 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.877779 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/26a4b167-a30a-4655-80aa-2177fe14784c-config-data\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.878144 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.878377 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.879158 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/26a4b167-a30a-4655-80aa-2177fe14784c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.881031 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/26a4b167-a30a-4655-80aa-2177fe14784c-server-conf\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.884888 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/26a4b167-a30a-4655-80aa-2177fe14784c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.885452 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.887036 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/26a4b167-a30a-4655-80aa-2177fe14784c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.890386 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkpzp\" (UniqueName: \"kubernetes.io/projected/26a4b167-a30a-4655-80aa-2177fe14784c-kube-api-access-xkpzp\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.916847 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:57 crc kubenswrapper[4923]: I1128 11:25:57.940430 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " pod="openstack/rabbitmq-server-0" Nov 28 11:25:58 crc kubenswrapper[4923]: I1128 11:25:58.006223 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 11:25:58 crc kubenswrapper[4923]: I1128 11:25:58.093287 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 11:25:58 crc kubenswrapper[4923]: I1128 11:25:58.375997 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" event={"ID":"d4882651-e2b0-4ab5-911f-3f1755c56d18","Type":"ContainerStarted","Data":"cdf7b68abfee4906f81a2f2139bede7f21cb1f10cfbd0cd4fe58a286d52f798f"} Nov 28 11:25:58 crc kubenswrapper[4923]: I1128 11:25:58.383050 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"456d70c2-443b-455b-83fe-fc87e36534ac","Type":"ContainerStarted","Data":"5ffc4ee04064bb80be9d39e3dc205bb638951a4d3a75fe73e58349463458df24"} Nov 28 11:25:58 crc kubenswrapper[4923]: I1128 11:25:58.770595 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 11:25:58 crc kubenswrapper[4923]: W1128 11:25:58.787171 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod26a4b167_a30a_4655_80aa_2177fe14784c.slice/crio-4e605008a9e35bfdf40121092457048041106e5fe3729ef178959cc3b538e626 WatchSource:0}: Error finding container 4e605008a9e35bfdf40121092457048041106e5fe3729ef178959cc3b538e626: Status 404 returned error can't find the container with id 4e605008a9e35bfdf40121092457048041106e5fe3729ef178959cc3b538e626 Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.086098 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.088137 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.090213 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-gvjpv" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.104752 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.131691 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.163479 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.168387 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.169048 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.207201 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.207246 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.207273 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-kolla-config\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.207341 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-config-data-default\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.207404 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.207453 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.207473 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpxgs\" (UniqueName: \"kubernetes.io/projected/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-kube-api-access-vpxgs\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.207531 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.308763 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.308821 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-kolla-config\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.308865 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-config-data-default\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.308892 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.308927 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.308958 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpxgs\" (UniqueName: \"kubernetes.io/projected/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-kube-api-access-vpxgs\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.308996 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.309031 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.309796 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.310633 4923 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.310894 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-config-data-default\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.311387 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-kolla-config\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.311450 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.368330 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.369591 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.393121 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpxgs\" (UniqueName: \"kubernetes.io/projected/b55ac65c-e6ce-46ea-83cc-83afef1efcf9-kube-api-access-vpxgs\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.404056 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"b55ac65c-e6ce-46ea-83cc-83afef1efcf9\") " pod="openstack/openstack-galera-0" Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.450131 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"26a4b167-a30a-4655-80aa-2177fe14784c","Type":"ContainerStarted","Data":"4e605008a9e35bfdf40121092457048041106e5fe3729ef178959cc3b538e626"} Nov 28 11:25:59 crc kubenswrapper[4923]: I1128 11:25:59.474273 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.245965 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 11:26:00 crc kubenswrapper[4923]: W1128 11:26:00.317672 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb55ac65c_e6ce_46ea_83cc_83afef1efcf9.slice/crio-26004b8f35be71d99b8d8a6e819eb7e8cda95634d497ed28b8775aebce88b6d9 WatchSource:0}: Error finding container 26004b8f35be71d99b8d8a6e819eb7e8cda95634d497ed28b8775aebce88b6d9: Status 404 returned error can't find the container with id 26004b8f35be71d99b8d8a6e819eb7e8cda95634d497ed28b8775aebce88b6d9 Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.493536 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b55ac65c-e6ce-46ea-83cc-83afef1efcf9","Type":"ContainerStarted","Data":"26004b8f35be71d99b8d8a6e819eb7e8cda95634d497ed28b8775aebce88b6d9"} Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.536373 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.537770 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.540483 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-v4kqb" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.540737 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.540949 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.541306 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.568436 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.663106 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1bddc188-1e43-4efd-9228-ac466ce69994-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.663418 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1bddc188-1e43-4efd-9228-ac466ce69994-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.663510 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bddc188-1e43-4efd-9228-ac466ce69994-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.663600 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.663678 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1bddc188-1e43-4efd-9228-ac466ce69994-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.663756 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1bddc188-1e43-4efd-9228-ac466ce69994-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.663831 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfrbf\" (UniqueName: \"kubernetes.io/projected/1bddc188-1e43-4efd-9228-ac466ce69994-kube-api-access-tfrbf\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.663914 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1bddc188-1e43-4efd-9228-ac466ce69994-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.667570 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.680684 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.684563 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-lnqll" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.684707 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.692686 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.699803 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.767156 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1bddc188-1e43-4efd-9228-ac466ce69994-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.767202 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1bddc188-1e43-4efd-9228-ac466ce69994-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.767247 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.767264 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1bddc188-1e43-4efd-9228-ac466ce69994-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.767280 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bddc188-1e43-4efd-9228-ac466ce69994-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.767311 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1bddc188-1e43-4efd-9228-ac466ce69994-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.767326 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfrbf\" (UniqueName: \"kubernetes.io/projected/1bddc188-1e43-4efd-9228-ac466ce69994-kube-api-access-tfrbf\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.767352 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1bddc188-1e43-4efd-9228-ac466ce69994-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.769808 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1bddc188-1e43-4efd-9228-ac466ce69994-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.770148 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1bddc188-1e43-4efd-9228-ac466ce69994-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.770427 4923 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.770563 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1bddc188-1e43-4efd-9228-ac466ce69994-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.791753 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bddc188-1e43-4efd-9228-ac466ce69994-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.794725 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1bddc188-1e43-4efd-9228-ac466ce69994-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.815246 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1bddc188-1e43-4efd-9228-ac466ce69994-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.815911 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfrbf\" (UniqueName: \"kubernetes.io/projected/1bddc188-1e43-4efd-9228-ac466ce69994-kube-api-access-tfrbf\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.845502 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1bddc188-1e43-4efd-9228-ac466ce69994\") " pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.877328 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.877712 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gw9sq\" (UniqueName: \"kubernetes.io/projected/c444a612-9839-4189-be4c-955e0f964442-kube-api-access-gw9sq\") pod \"memcached-0\" (UID: \"c444a612-9839-4189-be4c-955e0f964442\") " pod="openstack/memcached-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.877784 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c444a612-9839-4189-be4c-955e0f964442-kolla-config\") pod \"memcached-0\" (UID: \"c444a612-9839-4189-be4c-955e0f964442\") " pod="openstack/memcached-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.877803 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c444a612-9839-4189-be4c-955e0f964442-combined-ca-bundle\") pod \"memcached-0\" (UID: \"c444a612-9839-4189-be4c-955e0f964442\") " pod="openstack/memcached-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.877826 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/c444a612-9839-4189-be4c-955e0f964442-memcached-tls-certs\") pod \"memcached-0\" (UID: \"c444a612-9839-4189-be4c-955e0f964442\") " pod="openstack/memcached-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.877846 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c444a612-9839-4189-be4c-955e0f964442-config-data\") pod \"memcached-0\" (UID: \"c444a612-9839-4189-be4c-955e0f964442\") " pod="openstack/memcached-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.978814 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c444a612-9839-4189-be4c-955e0f964442-kolla-config\") pod \"memcached-0\" (UID: \"c444a612-9839-4189-be4c-955e0f964442\") " pod="openstack/memcached-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.978853 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c444a612-9839-4189-be4c-955e0f964442-combined-ca-bundle\") pod \"memcached-0\" (UID: \"c444a612-9839-4189-be4c-955e0f964442\") " pod="openstack/memcached-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.978883 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/c444a612-9839-4189-be4c-955e0f964442-memcached-tls-certs\") pod \"memcached-0\" (UID: \"c444a612-9839-4189-be4c-955e0f964442\") " pod="openstack/memcached-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.978904 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c444a612-9839-4189-be4c-955e0f964442-config-data\") pod \"memcached-0\" (UID: \"c444a612-9839-4189-be4c-955e0f964442\") " pod="openstack/memcached-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.978986 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gw9sq\" (UniqueName: \"kubernetes.io/projected/c444a612-9839-4189-be4c-955e0f964442-kube-api-access-gw9sq\") pod \"memcached-0\" (UID: \"c444a612-9839-4189-be4c-955e0f964442\") " pod="openstack/memcached-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.981252 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c444a612-9839-4189-be4c-955e0f964442-kolla-config\") pod \"memcached-0\" (UID: \"c444a612-9839-4189-be4c-955e0f964442\") " pod="openstack/memcached-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.981312 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c444a612-9839-4189-be4c-955e0f964442-config-data\") pod \"memcached-0\" (UID: \"c444a612-9839-4189-be4c-955e0f964442\") " pod="openstack/memcached-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.983489 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/c444a612-9839-4189-be4c-955e0f964442-memcached-tls-certs\") pod \"memcached-0\" (UID: \"c444a612-9839-4189-be4c-955e0f964442\") " pod="openstack/memcached-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.983772 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c444a612-9839-4189-be4c-955e0f964442-combined-ca-bundle\") pod \"memcached-0\" (UID: \"c444a612-9839-4189-be4c-955e0f964442\") " pod="openstack/memcached-0" Nov 28 11:26:00 crc kubenswrapper[4923]: I1128 11:26:00.993624 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gw9sq\" (UniqueName: \"kubernetes.io/projected/c444a612-9839-4189-be4c-955e0f964442-kube-api-access-gw9sq\") pod \"memcached-0\" (UID: \"c444a612-9839-4189-be4c-955e0f964442\") " pod="openstack/memcached-0" Nov 28 11:26:01 crc kubenswrapper[4923]: I1128 11:26:01.013074 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 11:26:01 crc kubenswrapper[4923]: I1128 11:26:01.550706 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 11:26:01 crc kubenswrapper[4923]: I1128 11:26:01.704794 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 11:26:01 crc kubenswrapper[4923]: W1128 11:26:01.731379 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc444a612_9839_4189_be4c_955e0f964442.slice/crio-52d4043d64fbe9814c9541ebd54ab8da972af1e3fdfc4762b2fa1d9567729c13 WatchSource:0}: Error finding container 52d4043d64fbe9814c9541ebd54ab8da972af1e3fdfc4762b2fa1d9567729c13: Status 404 returned error can't find the container with id 52d4043d64fbe9814c9541ebd54ab8da972af1e3fdfc4762b2fa1d9567729c13 Nov 28 11:26:02 crc kubenswrapper[4923]: I1128 11:26:02.550231 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 11:26:02 crc kubenswrapper[4923]: I1128 11:26:02.551222 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 11:26:02 crc kubenswrapper[4923]: I1128 11:26:02.554217 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-lcpnb" Nov 28 11:26:02 crc kubenswrapper[4923]: I1128 11:26:02.567509 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 11:26:02 crc kubenswrapper[4923]: I1128 11:26:02.628600 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwddd\" (UniqueName: \"kubernetes.io/projected/21f79010-cee5-4d8c-8af5-cab32d6b0031-kube-api-access-nwddd\") pod \"kube-state-metrics-0\" (UID: \"21f79010-cee5-4d8c-8af5-cab32d6b0031\") " pod="openstack/kube-state-metrics-0" Nov 28 11:26:02 crc kubenswrapper[4923]: I1128 11:26:02.667396 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"c444a612-9839-4189-be4c-955e0f964442","Type":"ContainerStarted","Data":"52d4043d64fbe9814c9541ebd54ab8da972af1e3fdfc4762b2fa1d9567729c13"} Nov 28 11:26:02 crc kubenswrapper[4923]: I1128 11:26:02.698261 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1bddc188-1e43-4efd-9228-ac466ce69994","Type":"ContainerStarted","Data":"014ff341605306c8f7ed3e7de65349db5a6d41f636130813a2485a691a711927"} Nov 28 11:26:02 crc kubenswrapper[4923]: I1128 11:26:02.730266 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwddd\" (UniqueName: \"kubernetes.io/projected/21f79010-cee5-4d8c-8af5-cab32d6b0031-kube-api-access-nwddd\") pod \"kube-state-metrics-0\" (UID: \"21f79010-cee5-4d8c-8af5-cab32d6b0031\") " pod="openstack/kube-state-metrics-0" Nov 28 11:26:02 crc kubenswrapper[4923]: I1128 11:26:02.766818 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwddd\" (UniqueName: \"kubernetes.io/projected/21f79010-cee5-4d8c-8af5-cab32d6b0031-kube-api-access-nwddd\") pod \"kube-state-metrics-0\" (UID: \"21f79010-cee5-4d8c-8af5-cab32d6b0031\") " pod="openstack/kube-state-metrics-0" Nov 28 11:26:02 crc kubenswrapper[4923]: I1128 11:26:02.931715 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 11:26:03 crc kubenswrapper[4923]: I1128 11:26:03.557129 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 11:26:03 crc kubenswrapper[4923]: I1128 11:26:03.753670 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"21f79010-cee5-4d8c-8af5-cab32d6b0031","Type":"ContainerStarted","Data":"b4f8cbfe2299b472630863ab3fbe97b790841988c7e87c746d8542dab415192c"} Nov 28 11:26:05 crc kubenswrapper[4923]: I1128 11:26:05.980171 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-67x9j"] Nov 28 11:26:05 crc kubenswrapper[4923]: I1128 11:26:05.981418 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-67x9j" Nov 28 11:26:05 crc kubenswrapper[4923]: I1128 11:26:05.983817 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-zjtsg" Nov 28 11:26:05 crc kubenswrapper[4923]: I1128 11:26:05.984150 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 28 11:26:05 crc kubenswrapper[4923]: I1128 11:26:05.984208 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 28 11:26:05 crc kubenswrapper[4923]: I1128 11:26:05.993386 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-6d9vf"] Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.000574 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-67x9j"] Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.000675 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.022099 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-6d9vf"] Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.103149 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/92ee0076-8a34-461d-8af0-1e0739e91266-var-lib\") pod \"ovn-controller-ovs-6d9vf\" (UID: \"92ee0076-8a34-461d-8af0-1e0739e91266\") " pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.103227 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/92ee0076-8a34-461d-8af0-1e0739e91266-etc-ovs\") pod \"ovn-controller-ovs-6d9vf\" (UID: \"92ee0076-8a34-461d-8af0-1e0739e91266\") " pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.103251 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/817453fc-6da1-4525-85bf-0d8b22848ff1-ovn-controller-tls-certs\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.103297 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhm4w\" (UniqueName: \"kubernetes.io/projected/92ee0076-8a34-461d-8af0-1e0739e91266-kube-api-access-xhm4w\") pod \"ovn-controller-ovs-6d9vf\" (UID: \"92ee0076-8a34-461d-8af0-1e0739e91266\") " pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.103317 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/817453fc-6da1-4525-85bf-0d8b22848ff1-scripts\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.103371 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/817453fc-6da1-4525-85bf-0d8b22848ff1-var-log-ovn\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.103392 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/92ee0076-8a34-461d-8af0-1e0739e91266-var-log\") pod \"ovn-controller-ovs-6d9vf\" (UID: \"92ee0076-8a34-461d-8af0-1e0739e91266\") " pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.103409 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8jhs\" (UniqueName: \"kubernetes.io/projected/817453fc-6da1-4525-85bf-0d8b22848ff1-kube-api-access-g8jhs\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.103463 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/92ee0076-8a34-461d-8af0-1e0739e91266-scripts\") pod \"ovn-controller-ovs-6d9vf\" (UID: \"92ee0076-8a34-461d-8af0-1e0739e91266\") " pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.103486 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/817453fc-6da1-4525-85bf-0d8b22848ff1-var-run\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.103533 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/817453fc-6da1-4525-85bf-0d8b22848ff1-var-run-ovn\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.103547 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817453fc-6da1-4525-85bf-0d8b22848ff1-combined-ca-bundle\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.103563 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/92ee0076-8a34-461d-8af0-1e0739e91266-var-run\") pod \"ovn-controller-ovs-6d9vf\" (UID: \"92ee0076-8a34-461d-8af0-1e0739e91266\") " pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.205046 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/92ee0076-8a34-461d-8af0-1e0739e91266-var-lib\") pod \"ovn-controller-ovs-6d9vf\" (UID: \"92ee0076-8a34-461d-8af0-1e0739e91266\") " pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.205106 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/92ee0076-8a34-461d-8af0-1e0739e91266-etc-ovs\") pod \"ovn-controller-ovs-6d9vf\" (UID: \"92ee0076-8a34-461d-8af0-1e0739e91266\") " pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.205129 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/817453fc-6da1-4525-85bf-0d8b22848ff1-ovn-controller-tls-certs\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.205154 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhm4w\" (UniqueName: \"kubernetes.io/projected/92ee0076-8a34-461d-8af0-1e0739e91266-kube-api-access-xhm4w\") pod \"ovn-controller-ovs-6d9vf\" (UID: \"92ee0076-8a34-461d-8af0-1e0739e91266\") " pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.205177 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/817453fc-6da1-4525-85bf-0d8b22848ff1-scripts\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.205196 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/817453fc-6da1-4525-85bf-0d8b22848ff1-var-log-ovn\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.205212 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/92ee0076-8a34-461d-8af0-1e0739e91266-var-log\") pod \"ovn-controller-ovs-6d9vf\" (UID: \"92ee0076-8a34-461d-8af0-1e0739e91266\") " pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.205229 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8jhs\" (UniqueName: \"kubernetes.io/projected/817453fc-6da1-4525-85bf-0d8b22848ff1-kube-api-access-g8jhs\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.205250 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/92ee0076-8a34-461d-8af0-1e0739e91266-scripts\") pod \"ovn-controller-ovs-6d9vf\" (UID: \"92ee0076-8a34-461d-8af0-1e0739e91266\") " pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.205273 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/817453fc-6da1-4525-85bf-0d8b22848ff1-var-run\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.205294 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/817453fc-6da1-4525-85bf-0d8b22848ff1-var-run-ovn\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.205307 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817453fc-6da1-4525-85bf-0d8b22848ff1-combined-ca-bundle\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.205323 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/92ee0076-8a34-461d-8af0-1e0739e91266-var-run\") pod \"ovn-controller-ovs-6d9vf\" (UID: \"92ee0076-8a34-461d-8af0-1e0739e91266\") " pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.205726 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/817453fc-6da1-4525-85bf-0d8b22848ff1-var-run\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.205803 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/817453fc-6da1-4525-85bf-0d8b22848ff1-var-run-ovn\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.205824 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/92ee0076-8a34-461d-8af0-1e0739e91266-etc-ovs\") pod \"ovn-controller-ovs-6d9vf\" (UID: \"92ee0076-8a34-461d-8af0-1e0739e91266\") " pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.205909 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/92ee0076-8a34-461d-8af0-1e0739e91266-var-run\") pod \"ovn-controller-ovs-6d9vf\" (UID: \"92ee0076-8a34-461d-8af0-1e0739e91266\") " pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.206007 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/817453fc-6da1-4525-85bf-0d8b22848ff1-var-log-ovn\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.206099 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/92ee0076-8a34-461d-8af0-1e0739e91266-var-log\") pod \"ovn-controller-ovs-6d9vf\" (UID: \"92ee0076-8a34-461d-8af0-1e0739e91266\") " pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.206790 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/92ee0076-8a34-461d-8af0-1e0739e91266-var-lib\") pod \"ovn-controller-ovs-6d9vf\" (UID: \"92ee0076-8a34-461d-8af0-1e0739e91266\") " pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.207953 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/92ee0076-8a34-461d-8af0-1e0739e91266-scripts\") pod \"ovn-controller-ovs-6d9vf\" (UID: \"92ee0076-8a34-461d-8af0-1e0739e91266\") " pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.209236 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/817453fc-6da1-4525-85bf-0d8b22848ff1-scripts\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.212381 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817453fc-6da1-4525-85bf-0d8b22848ff1-combined-ca-bundle\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.214475 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/817453fc-6da1-4525-85bf-0d8b22848ff1-ovn-controller-tls-certs\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.220328 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhm4w\" (UniqueName: \"kubernetes.io/projected/92ee0076-8a34-461d-8af0-1e0739e91266-kube-api-access-xhm4w\") pod \"ovn-controller-ovs-6d9vf\" (UID: \"92ee0076-8a34-461d-8af0-1e0739e91266\") " pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.230060 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8jhs\" (UniqueName: \"kubernetes.io/projected/817453fc-6da1-4525-85bf-0d8b22848ff1-kube-api-access-g8jhs\") pod \"ovn-controller-67x9j\" (UID: \"817453fc-6da1-4525-85bf-0d8b22848ff1\") " pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.360311 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-67x9j" Nov 28 11:26:06 crc kubenswrapper[4923]: I1128 11:26:06.374819 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.631860 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.633416 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.640289 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.666118 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.666489 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.666848 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.666995 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-f95rz" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.670275 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.767823 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/87cb4282-026f-4c9b-8854-c410a2751727-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.768225 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/87cb4282-026f-4c9b-8854-c410a2751727-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.768261 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xtxs\" (UniqueName: \"kubernetes.io/projected/87cb4282-026f-4c9b-8854-c410a2751727-kube-api-access-5xtxs\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.768308 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/87cb4282-026f-4c9b-8854-c410a2751727-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.768347 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87cb4282-026f-4c9b-8854-c410a2751727-config\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.768367 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/87cb4282-026f-4c9b-8854-c410a2751727-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.768383 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.768425 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87cb4282-026f-4c9b-8854-c410a2751727-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.869823 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87cb4282-026f-4c9b-8854-c410a2751727-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.869927 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/87cb4282-026f-4c9b-8854-c410a2751727-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.870689 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/87cb4282-026f-4c9b-8854-c410a2751727-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.870721 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xtxs\" (UniqueName: \"kubernetes.io/projected/87cb4282-026f-4c9b-8854-c410a2751727-kube-api-access-5xtxs\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.871170 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/87cb4282-026f-4c9b-8854-c410a2751727-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.872736 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/87cb4282-026f-4c9b-8854-c410a2751727-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.872884 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87cb4282-026f-4c9b-8854-c410a2751727-config\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.872911 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/87cb4282-026f-4c9b-8854-c410a2751727-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.873017 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.875704 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87cb4282-026f-4c9b-8854-c410a2751727-config\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.875988 4923 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.877250 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/87cb4282-026f-4c9b-8854-c410a2751727-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.885149 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/87cb4282-026f-4c9b-8854-c410a2751727-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.887362 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/87cb4282-026f-4c9b-8854-c410a2751727-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.918087 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87cb4282-026f-4c9b-8854-c410a2751727-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.934459 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xtxs\" (UniqueName: \"kubernetes.io/projected/87cb4282-026f-4c9b-8854-c410a2751727-kube-api-access-5xtxs\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:08 crc kubenswrapper[4923]: I1128 11:26:08.942582 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"87cb4282-026f-4c9b-8854-c410a2751727\") " pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:09 crc kubenswrapper[4923]: I1128 11:26:09.000413 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:09 crc kubenswrapper[4923]: I1128 11:26:09.922219 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 11:26:09 crc kubenswrapper[4923]: I1128 11:26:09.923391 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:09 crc kubenswrapper[4923]: I1128 11:26:09.930450 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 28 11:26:09 crc kubenswrapper[4923]: I1128 11:26:09.930833 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-f69hg" Nov 28 11:26:09 crc kubenswrapper[4923]: I1128 11:26:09.930901 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 28 11:26:09 crc kubenswrapper[4923]: I1128 11:26:09.930964 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 28 11:26:09 crc kubenswrapper[4923]: I1128 11:26:09.936738 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.090234 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ed74870d-915b-4790-9e30-02757e0c4e57-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.090285 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed74870d-915b-4790-9e30-02757e0c4e57-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.090323 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ed74870d-915b-4790-9e30-02757e0c4e57-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.090406 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.090425 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed74870d-915b-4790-9e30-02757e0c4e57-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.090459 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed74870d-915b-4790-9e30-02757e0c4e57-config\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.090486 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed74870d-915b-4790-9e30-02757e0c4e57-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.090508 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5p6dk\" (UniqueName: \"kubernetes.io/projected/ed74870d-915b-4790-9e30-02757e0c4e57-kube-api-access-5p6dk\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.191674 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.191719 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed74870d-915b-4790-9e30-02757e0c4e57-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.191742 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed74870d-915b-4790-9e30-02757e0c4e57-config\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.191772 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed74870d-915b-4790-9e30-02757e0c4e57-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.191791 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5p6dk\" (UniqueName: \"kubernetes.io/projected/ed74870d-915b-4790-9e30-02757e0c4e57-kube-api-access-5p6dk\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.191826 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ed74870d-915b-4790-9e30-02757e0c4e57-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.191846 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed74870d-915b-4790-9e30-02757e0c4e57-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.191872 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ed74870d-915b-4790-9e30-02757e0c4e57-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.192495 4923 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.192923 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ed74870d-915b-4790-9e30-02757e0c4e57-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.194421 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ed74870d-915b-4790-9e30-02757e0c4e57-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.194430 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed74870d-915b-4790-9e30-02757e0c4e57-config\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.204311 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed74870d-915b-4790-9e30-02757e0c4e57-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.208253 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5p6dk\" (UniqueName: \"kubernetes.io/projected/ed74870d-915b-4790-9e30-02757e0c4e57-kube-api-access-5p6dk\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.208316 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed74870d-915b-4790-9e30-02757e0c4e57-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.221904 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed74870d-915b-4790-9e30-02757e0c4e57-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.230452 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-sb-0\" (UID: \"ed74870d-915b-4790-9e30-02757e0c4e57\") " pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:10 crc kubenswrapper[4923]: I1128 11:26:10.250049 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:20 crc kubenswrapper[4923]: E1128 11:26:20.830392 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Nov 28 11:26:20 crc kubenswrapper[4923]: E1128 11:26:20.831159 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:nchf7h5fbh697h568h5h697h686hcbh575h5f6h685h8bhf9h568h649h65dh588hd4hfh645h8ch5b5h548hbdh549h694h84h686h5bfh56chfq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gw9sq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(c444a612-9839-4189-be4c-955e0f964442): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 11:26:20 crc kubenswrapper[4923]: E1128 11:26:20.832604 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="c444a612-9839-4189-be4c-955e0f964442" Nov 28 11:26:21 crc kubenswrapper[4923]: E1128 11:26:21.028026 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="c444a612-9839-4189-be4c-955e0f964442" Nov 28 11:26:22 crc kubenswrapper[4923]: E1128 11:26:22.740648 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 28 11:26:22 crc kubenswrapper[4923]: E1128 11:26:22.741296 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tfrbf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(1bddc188-1e43-4efd-9228-ac466ce69994): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 11:26:22 crc kubenswrapper[4923]: E1128 11:26:22.742588 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="1bddc188-1e43-4efd-9228-ac466ce69994" Nov 28 11:26:23 crc kubenswrapper[4923]: E1128 11:26:23.044603 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="1bddc188-1e43-4efd-9228-ac466ce69994" Nov 28 11:26:23 crc kubenswrapper[4923]: E1128 11:26:23.718157 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 28 11:26:23 crc kubenswrapper[4923]: E1128 11:26:23.718357 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mkq5z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(456d70c2-443b-455b-83fe-fc87e36534ac): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 11:26:23 crc kubenswrapper[4923]: E1128 11:26:23.719533 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="456d70c2-443b-455b-83fe-fc87e36534ac" Nov 28 11:26:23 crc kubenswrapper[4923]: E1128 11:26:23.735413 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 28 11:26:23 crc kubenswrapper[4923]: E1128 11:26:23.735654 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xkpzp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(26a4b167-a30a-4655-80aa-2177fe14784c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 11:26:23 crc kubenswrapper[4923]: E1128 11:26:23.736880 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="26a4b167-a30a-4655-80aa-2177fe14784c" Nov 28 11:26:23 crc kubenswrapper[4923]: E1128 11:26:23.750218 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 28 11:26:23 crc kubenswrapper[4923]: E1128 11:26:23.750335 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vpxgs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(b55ac65c-e6ce-46ea-83cc-83afef1efcf9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 11:26:23 crc kubenswrapper[4923]: E1128 11:26:23.751481 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="b55ac65c-e6ce-46ea-83cc-83afef1efcf9" Nov 28 11:26:24 crc kubenswrapper[4923]: E1128 11:26:24.051068 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="b55ac65c-e6ce-46ea-83cc-83afef1efcf9" Nov 28 11:26:24 crc kubenswrapper[4923]: E1128 11:26:24.051543 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="456d70c2-443b-455b-83fe-fc87e36534ac" Nov 28 11:26:24 crc kubenswrapper[4923]: E1128 11:26:24.051633 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="26a4b167-a30a-4655-80aa-2177fe14784c" Nov 28 11:26:28 crc kubenswrapper[4923]: I1128 11:26:28.507512 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-67x9j"] Nov 28 11:26:28 crc kubenswrapper[4923]: I1128 11:26:28.759098 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 11:26:28 crc kubenswrapper[4923]: E1128 11:26:28.977310 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 11:26:28 crc kubenswrapper[4923]: E1128 11:26:28.977505 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rbww4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-7rt69_openstack(bff70415-e77c-4c9f-8955-f074232c0564): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 11:26:28 crc kubenswrapper[4923]: E1128 11:26:28.979248 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-7rt69" podUID="bff70415-e77c-4c9f-8955-f074232c0564" Nov 28 11:26:28 crc kubenswrapper[4923]: E1128 11:26:28.988890 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 11:26:28 crc kubenswrapper[4923]: E1128 11:26:28.989118 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nfdh5dfhb6h64h676hc4h78h97h669h54chfbh696hb5h54bh5d4h6bh64h644h677h584h5cbh698h9dh5bbh5f8h5b8hcdh644h5c7h694hbfh589q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9fs9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5ccc8479f9-shqft_openstack(69a9b4ec-46dc-427a-9398-658ec781a88c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 11:26:28 crc kubenswrapper[4923]: E1128 11:26:28.990579 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" podUID="69a9b4ec-46dc-427a-9398-658ec781a88c" Nov 28 11:26:29 crc kubenswrapper[4923]: E1128 11:26:29.001088 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 11:26:29 crc kubenswrapper[4923]: E1128 11:26:29.001298 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-n2snr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-z5rmn_openstack(d4882651-e2b0-4ab5-911f-3f1755c56d18): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 11:26:29 crc kubenswrapper[4923]: E1128 11:26:29.002465 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" podUID="d4882651-e2b0-4ab5-911f-3f1755c56d18" Nov 28 11:26:29 crc kubenswrapper[4923]: E1128 11:26:29.026466 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 11:26:29 crc kubenswrapper[4923]: E1128 11:26:29.026854 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cmklh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-7rsr2_openstack(06bf7bf6-2a65-40b4-9592-a942fb92d473): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 11:26:29 crc kubenswrapper[4923]: E1128 11:26:29.028041 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-7rsr2" podUID="06bf7bf6-2a65-40b4-9592-a942fb92d473" Nov 28 11:26:29 crc kubenswrapper[4923]: I1128 11:26:29.087052 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-67x9j" event={"ID":"817453fc-6da1-4525-85bf-0d8b22848ff1","Type":"ContainerStarted","Data":"1b26d65e149d4aa7222e28429377a770af06382414e87a4b93e91c3d3ca3b820"} Nov 28 11:26:29 crc kubenswrapper[4923]: I1128 11:26:29.088656 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"87cb4282-026f-4c9b-8854-c410a2751727","Type":"ContainerStarted","Data":"f3961a852af069c5b02b01d698dac333acd54c7c5d87cff73a42811fbc8b8984"} Nov 28 11:26:29 crc kubenswrapper[4923]: E1128 11:26:29.091034 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" podUID="d4882651-e2b0-4ab5-911f-3f1755c56d18" Nov 28 11:26:29 crc kubenswrapper[4923]: E1128 11:26:29.096112 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" podUID="69a9b4ec-46dc-427a-9398-658ec781a88c" Nov 28 11:26:29 crc kubenswrapper[4923]: I1128 11:26:29.463209 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 11:26:29 crc kubenswrapper[4923]: I1128 11:26:29.619276 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-6d9vf"] Nov 28 11:26:29 crc kubenswrapper[4923]: W1128 11:26:29.786632 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded74870d_915b_4790_9e30_02757e0c4e57.slice/crio-a89174c1aa3d85d084e059266ddcb5802b0216278985f25631fc12ffb1e5e61b WatchSource:0}: Error finding container a89174c1aa3d85d084e059266ddcb5802b0216278985f25631fc12ffb1e5e61b: Status 404 returned error can't find the container with id a89174c1aa3d85d084e059266ddcb5802b0216278985f25631fc12ffb1e5e61b Nov 28 11:26:29 crc kubenswrapper[4923]: E1128 11:26:29.790087 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 28 11:26:29 crc kubenswrapper[4923]: E1128 11:26:29.791186 4923 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 28 11:26:29 crc kubenswrapper[4923]: E1128 11:26:29.791295 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nwddd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(21f79010-cee5-4d8c-8af5-cab32d6b0031): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 11:26:29 crc kubenswrapper[4923]: E1128 11:26:29.792389 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="21f79010-cee5-4d8c-8af5-cab32d6b0031" Nov 28 11:26:29 crc kubenswrapper[4923]: I1128 11:26:29.845217 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-7rt69" Nov 28 11:26:29 crc kubenswrapper[4923]: I1128 11:26:29.854137 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-7rsr2" Nov 28 11:26:29 crc kubenswrapper[4923]: I1128 11:26:29.973691 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmklh\" (UniqueName: \"kubernetes.io/projected/06bf7bf6-2a65-40b4-9592-a942fb92d473-kube-api-access-cmklh\") pod \"06bf7bf6-2a65-40b4-9592-a942fb92d473\" (UID: \"06bf7bf6-2a65-40b4-9592-a942fb92d473\") " Nov 28 11:26:29 crc kubenswrapper[4923]: I1128 11:26:29.973814 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bff70415-e77c-4c9f-8955-f074232c0564-config\") pod \"bff70415-e77c-4c9f-8955-f074232c0564\" (UID: \"bff70415-e77c-4c9f-8955-f074232c0564\") " Nov 28 11:26:29 crc kubenswrapper[4923]: I1128 11:26:29.973904 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06bf7bf6-2a65-40b4-9592-a942fb92d473-config\") pod \"06bf7bf6-2a65-40b4-9592-a942fb92d473\" (UID: \"06bf7bf6-2a65-40b4-9592-a942fb92d473\") " Nov 28 11:26:29 crc kubenswrapper[4923]: I1128 11:26:29.974061 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bff70415-e77c-4c9f-8955-f074232c0564-dns-svc\") pod \"bff70415-e77c-4c9f-8955-f074232c0564\" (UID: \"bff70415-e77c-4c9f-8955-f074232c0564\") " Nov 28 11:26:29 crc kubenswrapper[4923]: I1128 11:26:29.974140 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbww4\" (UniqueName: \"kubernetes.io/projected/bff70415-e77c-4c9f-8955-f074232c0564-kube-api-access-rbww4\") pod \"bff70415-e77c-4c9f-8955-f074232c0564\" (UID: \"bff70415-e77c-4c9f-8955-f074232c0564\") " Nov 28 11:26:29 crc kubenswrapper[4923]: I1128 11:26:29.976305 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bff70415-e77c-4c9f-8955-f074232c0564-config" (OuterVolumeSpecName: "config") pod "bff70415-e77c-4c9f-8955-f074232c0564" (UID: "bff70415-e77c-4c9f-8955-f074232c0564"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:26:29 crc kubenswrapper[4923]: I1128 11:26:29.980863 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bff70415-e77c-4c9f-8955-f074232c0564-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bff70415-e77c-4c9f-8955-f074232c0564" (UID: "bff70415-e77c-4c9f-8955-f074232c0564"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:26:29 crc kubenswrapper[4923]: I1128 11:26:29.980980 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bff70415-e77c-4c9f-8955-f074232c0564-kube-api-access-rbww4" (OuterVolumeSpecName: "kube-api-access-rbww4") pod "bff70415-e77c-4c9f-8955-f074232c0564" (UID: "bff70415-e77c-4c9f-8955-f074232c0564"). InnerVolumeSpecName "kube-api-access-rbww4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:26:29 crc kubenswrapper[4923]: I1128 11:26:29.981594 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06bf7bf6-2a65-40b4-9592-a942fb92d473-config" (OuterVolumeSpecName: "config") pod "06bf7bf6-2a65-40b4-9592-a942fb92d473" (UID: "06bf7bf6-2a65-40b4-9592-a942fb92d473"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:26:29 crc kubenswrapper[4923]: I1128 11:26:29.997187 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06bf7bf6-2a65-40b4-9592-a942fb92d473-kube-api-access-cmklh" (OuterVolumeSpecName: "kube-api-access-cmklh") pod "06bf7bf6-2a65-40b4-9592-a942fb92d473" (UID: "06bf7bf6-2a65-40b4-9592-a942fb92d473"). InnerVolumeSpecName "kube-api-access-cmklh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:26:30 crc kubenswrapper[4923]: I1128 11:26:30.076098 4923 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bff70415-e77c-4c9f-8955-f074232c0564-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 11:26:30 crc kubenswrapper[4923]: I1128 11:26:30.076128 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbww4\" (UniqueName: \"kubernetes.io/projected/bff70415-e77c-4c9f-8955-f074232c0564-kube-api-access-rbww4\") on node \"crc\" DevicePath \"\"" Nov 28 11:26:30 crc kubenswrapper[4923]: I1128 11:26:30.076140 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmklh\" (UniqueName: \"kubernetes.io/projected/06bf7bf6-2a65-40b4-9592-a942fb92d473-kube-api-access-cmklh\") on node \"crc\" DevicePath \"\"" Nov 28 11:26:30 crc kubenswrapper[4923]: I1128 11:26:30.076152 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bff70415-e77c-4c9f-8955-f074232c0564-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:26:30 crc kubenswrapper[4923]: I1128 11:26:30.076161 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06bf7bf6-2a65-40b4-9592-a942fb92d473-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:26:30 crc kubenswrapper[4923]: I1128 11:26:30.094989 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-6d9vf" event={"ID":"92ee0076-8a34-461d-8af0-1e0739e91266","Type":"ContainerStarted","Data":"163067ffaa75027acb774bf8bf0052b089a4b7f9b0323b8a1e682ce26bcd06ba"} Nov 28 11:26:30 crc kubenswrapper[4923]: I1128 11:26:30.096051 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ed74870d-915b-4790-9e30-02757e0c4e57","Type":"ContainerStarted","Data":"a89174c1aa3d85d084e059266ddcb5802b0216278985f25631fc12ffb1e5e61b"} Nov 28 11:26:30 crc kubenswrapper[4923]: I1128 11:26:30.096912 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-7rsr2" event={"ID":"06bf7bf6-2a65-40b4-9592-a942fb92d473","Type":"ContainerDied","Data":"1fd9827d51ba21978630b4fbf0e576506296c926fcaec2dd95d7ec9b43c6ecc4"} Nov 28 11:26:30 crc kubenswrapper[4923]: I1128 11:26:30.096986 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-7rsr2" Nov 28 11:26:30 crc kubenswrapper[4923]: I1128 11:26:30.108016 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-7rt69" event={"ID":"bff70415-e77c-4c9f-8955-f074232c0564","Type":"ContainerDied","Data":"8d662a06e839abc4751e184c7509640437eb9467275c10a545a62d14bfd4a574"} Nov 28 11:26:30 crc kubenswrapper[4923]: I1128 11:26:30.108148 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-7rt69" Nov 28 11:26:30 crc kubenswrapper[4923]: E1128 11:26:30.110370 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="21f79010-cee5-4d8c-8af5-cab32d6b0031" Nov 28 11:26:30 crc kubenswrapper[4923]: I1128 11:26:30.179223 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7rsr2"] Nov 28 11:26:30 crc kubenswrapper[4923]: I1128 11:26:30.184086 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7rsr2"] Nov 28 11:26:30 crc kubenswrapper[4923]: I1128 11:26:30.194738 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-7rt69"] Nov 28 11:26:30 crc kubenswrapper[4923]: I1128 11:26:30.199294 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-7rt69"] Nov 28 11:26:31 crc kubenswrapper[4923]: I1128 11:26:31.179109 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06bf7bf6-2a65-40b4-9592-a942fb92d473" path="/var/lib/kubelet/pods/06bf7bf6-2a65-40b4-9592-a942fb92d473/volumes" Nov 28 11:26:31 crc kubenswrapper[4923]: I1128 11:26:31.179961 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bff70415-e77c-4c9f-8955-f074232c0564" path="/var/lib/kubelet/pods/bff70415-e77c-4c9f-8955-f074232c0564/volumes" Nov 28 11:26:40 crc kubenswrapper[4923]: I1128 11:26:40.198638 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-67x9j" event={"ID":"817453fc-6da1-4525-85bf-0d8b22848ff1","Type":"ContainerStarted","Data":"b45aaa6a88dc9a2d44fa04f8b8bcdb56e4cf94ec8000fc590ce50d582ba2d11e"} Nov 28 11:26:40 crc kubenswrapper[4923]: I1128 11:26:40.200075 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-67x9j" Nov 28 11:26:40 crc kubenswrapper[4923]: I1128 11:26:40.201956 4923 generic.go:334] "Generic (PLEG): container finished" podID="92ee0076-8a34-461d-8af0-1e0739e91266" containerID="a88591f3bb065774e37a6ed502456aa49e230e95d84c1a0f8f600e47d3c70332" exitCode=0 Nov 28 11:26:40 crc kubenswrapper[4923]: I1128 11:26:40.201993 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-6d9vf" event={"ID":"92ee0076-8a34-461d-8af0-1e0739e91266","Type":"ContainerDied","Data":"a88591f3bb065774e37a6ed502456aa49e230e95d84c1a0f8f600e47d3c70332"} Nov 28 11:26:40 crc kubenswrapper[4923]: I1128 11:26:40.203635 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"c444a612-9839-4189-be4c-955e0f964442","Type":"ContainerStarted","Data":"9ba54c24cbb7ecce84483c142b1068344723dfa90ea90e2bed3c71363112d182"} Nov 28 11:26:40 crc kubenswrapper[4923]: I1128 11:26:40.203818 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 28 11:26:40 crc kubenswrapper[4923]: I1128 11:26:40.209567 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ed74870d-915b-4790-9e30-02757e0c4e57","Type":"ContainerStarted","Data":"0cd9587ee13e652b32245f7bf59e2c5f86274a1c03b57d2f8795d93b1ae16d37"} Nov 28 11:26:40 crc kubenswrapper[4923]: I1128 11:26:40.211176 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"87cb4282-026f-4c9b-8854-c410a2751727","Type":"ContainerStarted","Data":"ff7f697b8871d97586483ac9f6d2943d54a31039c484e928a0351a5bc6431151"} Nov 28 11:26:40 crc kubenswrapper[4923]: I1128 11:26:40.212608 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1bddc188-1e43-4efd-9228-ac466ce69994","Type":"ContainerStarted","Data":"4e97774b3af26097f59dcc7beeef76915e7ef0da24fb7873fd07f048badf7cf3"} Nov 28 11:26:40 crc kubenswrapper[4923]: I1128 11:26:40.218585 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b55ac65c-e6ce-46ea-83cc-83afef1efcf9","Type":"ContainerStarted","Data":"197e5229f33c865f1138159bccedf1b157132bb4bc1e3bd34815de127fd7aed8"} Nov 28 11:26:40 crc kubenswrapper[4923]: I1128 11:26:40.241579 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-67x9j" podStartSLOduration=24.759205525 podStartE2EDuration="35.241565291s" podCreationTimestamp="2025-11-28 11:26:05 +0000 UTC" firstStartedPulling="2025-11-28 11:26:29.011071201 +0000 UTC m=+1068.139755411" lastFinishedPulling="2025-11-28 11:26:39.493430937 +0000 UTC m=+1078.622115177" observedRunningTime="2025-11-28 11:26:40.217263637 +0000 UTC m=+1079.345947847" watchObservedRunningTime="2025-11-28 11:26:40.241565291 +0000 UTC m=+1079.370249501" Nov 28 11:26:40 crc kubenswrapper[4923]: I1128 11:26:40.282368 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=2.529655797 podStartE2EDuration="40.282353429s" podCreationTimestamp="2025-11-28 11:26:00 +0000 UTC" firstStartedPulling="2025-11-28 11:26:01.73948995 +0000 UTC m=+1040.868174150" lastFinishedPulling="2025-11-28 11:26:39.492187532 +0000 UTC m=+1078.620871782" observedRunningTime="2025-11-28 11:26:40.274899189 +0000 UTC m=+1079.403583399" watchObservedRunningTime="2025-11-28 11:26:40.282353429 +0000 UTC m=+1079.411037629" Nov 28 11:26:41 crc kubenswrapper[4923]: I1128 11:26:41.227595 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"26a4b167-a30a-4655-80aa-2177fe14784c","Type":"ContainerStarted","Data":"877c6e8210bfbbb050a57173fa72769c5cca178fe72691fd5da642acdfd3f260"} Nov 28 11:26:41 crc kubenswrapper[4923]: I1128 11:26:41.230482 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-6d9vf" event={"ID":"92ee0076-8a34-461d-8af0-1e0739e91266","Type":"ContainerStarted","Data":"e394a1406639388b53ff670cfe4032746e52d06aaaa39253e187f50df44bf905"} Nov 28 11:26:41 crc kubenswrapper[4923]: I1128 11:26:41.230501 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-6d9vf" event={"ID":"92ee0076-8a34-461d-8af0-1e0739e91266","Type":"ContainerStarted","Data":"c90a9d9c7a044ad15de01dac97cf5102f6091d8246b03c76dfb747db9c4b7a2f"} Nov 28 11:26:41 crc kubenswrapper[4923]: I1128 11:26:41.230918 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:41 crc kubenswrapper[4923]: I1128 11:26:41.230971 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:26:41 crc kubenswrapper[4923]: I1128 11:26:41.232313 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"456d70c2-443b-455b-83fe-fc87e36534ac","Type":"ContainerStarted","Data":"6b9977387f4a04660289708811d6e9fd63ab44d05d56ad4d5f94de24f39428d6"} Nov 28 11:26:41 crc kubenswrapper[4923]: I1128 11:26:41.294683 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-6d9vf" podStartSLOduration=26.645904291 podStartE2EDuration="36.294667038s" podCreationTimestamp="2025-11-28 11:26:05 +0000 UTC" firstStartedPulling="2025-11-28 11:26:29.799868149 +0000 UTC m=+1068.928552359" lastFinishedPulling="2025-11-28 11:26:39.448630866 +0000 UTC m=+1078.577315106" observedRunningTime="2025-11-28 11:26:41.273166743 +0000 UTC m=+1080.401850953" watchObservedRunningTime="2025-11-28 11:26:41.294667038 +0000 UTC m=+1080.423351248" Nov 28 11:26:43 crc kubenswrapper[4923]: I1128 11:26:43.249146 4923 generic.go:334] "Generic (PLEG): container finished" podID="1bddc188-1e43-4efd-9228-ac466ce69994" containerID="4e97774b3af26097f59dcc7beeef76915e7ef0da24fb7873fd07f048badf7cf3" exitCode=0 Nov 28 11:26:43 crc kubenswrapper[4923]: I1128 11:26:43.249494 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1bddc188-1e43-4efd-9228-ac466ce69994","Type":"ContainerDied","Data":"4e97774b3af26097f59dcc7beeef76915e7ef0da24fb7873fd07f048badf7cf3"} Nov 28 11:26:44 crc kubenswrapper[4923]: I1128 11:26:44.262321 4923 generic.go:334] "Generic (PLEG): container finished" podID="d4882651-e2b0-4ab5-911f-3f1755c56d18" containerID="05df6b674f932ea03cf7a87795478a6a29700e564006f5a275fc379534bc7239" exitCode=0 Nov 28 11:26:44 crc kubenswrapper[4923]: I1128 11:26:44.262456 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" event={"ID":"d4882651-e2b0-4ab5-911f-3f1755c56d18","Type":"ContainerDied","Data":"05df6b674f932ea03cf7a87795478a6a29700e564006f5a275fc379534bc7239"} Nov 28 11:26:44 crc kubenswrapper[4923]: I1128 11:26:44.265484 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"21f79010-cee5-4d8c-8af5-cab32d6b0031","Type":"ContainerStarted","Data":"1736bac7b866d033abb565e298ecff59892496d4ccf42e13ef69d0f1c0229351"} Nov 28 11:26:44 crc kubenswrapper[4923]: I1128 11:26:44.266346 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 11:26:44 crc kubenswrapper[4923]: I1128 11:26:44.267575 4923 generic.go:334] "Generic (PLEG): container finished" podID="b55ac65c-e6ce-46ea-83cc-83afef1efcf9" containerID="197e5229f33c865f1138159bccedf1b157132bb4bc1e3bd34815de127fd7aed8" exitCode=0 Nov 28 11:26:44 crc kubenswrapper[4923]: I1128 11:26:44.267658 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b55ac65c-e6ce-46ea-83cc-83afef1efcf9","Type":"ContainerDied","Data":"197e5229f33c865f1138159bccedf1b157132bb4bc1e3bd34815de127fd7aed8"} Nov 28 11:26:44 crc kubenswrapper[4923]: I1128 11:26:44.274082 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ed74870d-915b-4790-9e30-02757e0c4e57","Type":"ContainerStarted","Data":"c41b454705bcbc5e4e18f5b9f484d4d6779f6ede6f3d88d7b01ca479e69cd0a2"} Nov 28 11:26:44 crc kubenswrapper[4923]: I1128 11:26:44.275691 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"87cb4282-026f-4c9b-8854-c410a2751727","Type":"ContainerStarted","Data":"c997d7c91f7c284a3f728efd7d5f3e9f18476a7e8729bf7b35a6e01523c67144"} Nov 28 11:26:44 crc kubenswrapper[4923]: I1128 11:26:44.280216 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1bddc188-1e43-4efd-9228-ac466ce69994","Type":"ContainerStarted","Data":"07c87d4b4cf668fbc31acdce84f82c0512ad98bc1ec704efab96af5377be65fe"} Nov 28 11:26:44 crc kubenswrapper[4923]: I1128 11:26:44.281127 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" event={"ID":"69a9b4ec-46dc-427a-9398-658ec781a88c","Type":"ContainerDied","Data":"d27c2a2ea3a5a51f59451051f71cf1410748e117ce728726e6cabe31c76e03f9"} Nov 28 11:26:44 crc kubenswrapper[4923]: I1128 11:26:44.281039 4923 generic.go:334] "Generic (PLEG): container finished" podID="69a9b4ec-46dc-427a-9398-658ec781a88c" containerID="d27c2a2ea3a5a51f59451051f71cf1410748e117ce728726e6cabe31c76e03f9" exitCode=0 Nov 28 11:26:44 crc kubenswrapper[4923]: I1128 11:26:44.374576 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=7.449266095 podStartE2EDuration="45.374556963s" podCreationTimestamp="2025-11-28 11:25:59 +0000 UTC" firstStartedPulling="2025-11-28 11:26:01.568095587 +0000 UTC m=+1040.696779797" lastFinishedPulling="2025-11-28 11:26:39.493386415 +0000 UTC m=+1078.622070665" observedRunningTime="2025-11-28 11:26:44.363646856 +0000 UTC m=+1083.492331096" watchObservedRunningTime="2025-11-28 11:26:44.374556963 +0000 UTC m=+1083.503241173" Nov 28 11:26:44 crc kubenswrapper[4923]: I1128 11:26:44.380506 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=3.024483597 podStartE2EDuration="42.38049266s" podCreationTimestamp="2025-11-28 11:26:02 +0000 UTC" firstStartedPulling="2025-11-28 11:26:03.585979474 +0000 UTC m=+1042.714663684" lastFinishedPulling="2025-11-28 11:26:42.941988527 +0000 UTC m=+1082.070672747" observedRunningTime="2025-11-28 11:26:44.380324975 +0000 UTC m=+1083.509009175" watchObservedRunningTime="2025-11-28 11:26:44.38049266 +0000 UTC m=+1083.509176870" Nov 28 11:26:44 crc kubenswrapper[4923]: I1128 11:26:44.432505 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=23.494332443 podStartE2EDuration="37.432486583s" podCreationTimestamp="2025-11-28 11:26:07 +0000 UTC" firstStartedPulling="2025-11-28 11:26:29.012088899 +0000 UTC m=+1068.140773109" lastFinishedPulling="2025-11-28 11:26:42.950243039 +0000 UTC m=+1082.078927249" observedRunningTime="2025-11-28 11:26:44.42065653 +0000 UTC m=+1083.549340730" watchObservedRunningTime="2025-11-28 11:26:44.432486583 +0000 UTC m=+1083.561170793" Nov 28 11:26:44 crc kubenswrapper[4923]: I1128 11:26:44.444369 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=23.25568254 podStartE2EDuration="36.444353397s" podCreationTimestamp="2025-11-28 11:26:08 +0000 UTC" firstStartedPulling="2025-11-28 11:26:29.795083925 +0000 UTC m=+1068.923768135" lastFinishedPulling="2025-11-28 11:26:42.983754782 +0000 UTC m=+1082.112438992" observedRunningTime="2025-11-28 11:26:44.436120405 +0000 UTC m=+1083.564804615" watchObservedRunningTime="2025-11-28 11:26:44.444353397 +0000 UTC m=+1083.573037607" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.000992 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.066467 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.250522 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.298874 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" event={"ID":"69a9b4ec-46dc-427a-9398-658ec781a88c","Type":"ContainerStarted","Data":"006d02cb2fa96661d78770be71569839915c2300da1336064c23e99243cd9960"} Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.299298 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.302632 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" event={"ID":"d4882651-e2b0-4ab5-911f-3f1755c56d18","Type":"ContainerStarted","Data":"c292ca20290ab83c6b4e7fc01e9085ba9f72393e37cd515046a85867e4b822ef"} Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.303034 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.309106 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b55ac65c-e6ce-46ea-83cc-83afef1efcf9","Type":"ContainerStarted","Data":"0f423ec6cafb3db52684a039e9f0297566c5a99eaee9184d6377c795eb4edeb0"} Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.309338 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.344583 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" podStartSLOduration=4.23646397 podStartE2EDuration="50.344557351s" podCreationTimestamp="2025-11-28 11:25:55 +0000 UTC" firstStartedPulling="2025-11-28 11:25:56.841639994 +0000 UTC m=+1035.970324204" lastFinishedPulling="2025-11-28 11:26:42.949733365 +0000 UTC m=+1082.078417585" observedRunningTime="2025-11-28 11:26:45.32747089 +0000 UTC m=+1084.456155140" watchObservedRunningTime="2025-11-28 11:26:45.344557351 +0000 UTC m=+1084.473241601" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.371431 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=7.852402286 podStartE2EDuration="47.371406626s" podCreationTimestamp="2025-11-28 11:25:58 +0000 UTC" firstStartedPulling="2025-11-28 11:26:00.336057264 +0000 UTC m=+1039.464741474" lastFinishedPulling="2025-11-28 11:26:39.855061604 +0000 UTC m=+1078.983745814" observedRunningTime="2025-11-28 11:26:45.360073028 +0000 UTC m=+1084.488757258" watchObservedRunningTime="2025-11-28 11:26:45.371406626 +0000 UTC m=+1084.500090876" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.380286 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" podStartSLOduration=3.8083625359999997 podStartE2EDuration="49.380263716s" podCreationTimestamp="2025-11-28 11:25:56 +0000 UTC" firstStartedPulling="2025-11-28 11:25:57.376573259 +0000 UTC m=+1036.505257469" lastFinishedPulling="2025-11-28 11:26:42.948474429 +0000 UTC m=+1082.077158649" observedRunningTime="2025-11-28 11:26:45.380149042 +0000 UTC m=+1084.508833262" watchObservedRunningTime="2025-11-28 11:26:45.380263716 +0000 UTC m=+1084.508947936" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.383303 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.661866 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-shqft"] Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.692700 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-rlnpt"] Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.693859 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.695899 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.774229 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-rlnpt"] Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.792170 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-8vd6b"] Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.796330 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.802855 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.815360 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2909a8f5-2cbf-42e7-b43c-699344dca6bc-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-rlnpt\" (UID: \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\") " pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.815401 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrgft\" (UniqueName: \"kubernetes.io/projected/2909a8f5-2cbf-42e7-b43c-699344dca6bc-kube-api-access-xrgft\") pod \"dnsmasq-dns-7fd796d7df-rlnpt\" (UID: \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\") " pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.815450 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2909a8f5-2cbf-42e7-b43c-699344dca6bc-config\") pod \"dnsmasq-dns-7fd796d7df-rlnpt\" (UID: \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\") " pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.815479 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2909a8f5-2cbf-42e7-b43c-699344dca6bc-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-rlnpt\" (UID: \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\") " pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.854715 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-8vd6b"] Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.917497 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrgft\" (UniqueName: \"kubernetes.io/projected/2909a8f5-2cbf-42e7-b43c-699344dca6bc-kube-api-access-xrgft\") pod \"dnsmasq-dns-7fd796d7df-rlnpt\" (UID: \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\") " pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.917556 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/5bb6b6a6-27ae-4724-bd56-4a8f3891595d-ovn-rundir\") pod \"ovn-controller-metrics-8vd6b\" (UID: \"5bb6b6a6-27ae-4724-bd56-4a8f3891595d\") " pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.917592 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2909a8f5-2cbf-42e7-b43c-699344dca6bc-config\") pod \"dnsmasq-dns-7fd796d7df-rlnpt\" (UID: \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\") " pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.917630 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2909a8f5-2cbf-42e7-b43c-699344dca6bc-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-rlnpt\" (UID: \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\") " pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.917662 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28tgb\" (UniqueName: \"kubernetes.io/projected/5bb6b6a6-27ae-4724-bd56-4a8f3891595d-kube-api-access-28tgb\") pod \"ovn-controller-metrics-8vd6b\" (UID: \"5bb6b6a6-27ae-4724-bd56-4a8f3891595d\") " pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.917684 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/5bb6b6a6-27ae-4724-bd56-4a8f3891595d-ovs-rundir\") pod \"ovn-controller-metrics-8vd6b\" (UID: \"5bb6b6a6-27ae-4724-bd56-4a8f3891595d\") " pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.917703 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bb6b6a6-27ae-4724-bd56-4a8f3891595d-config\") pod \"ovn-controller-metrics-8vd6b\" (UID: \"5bb6b6a6-27ae-4724-bd56-4a8f3891595d\") " pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.917721 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5bb6b6a6-27ae-4724-bd56-4a8f3891595d-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-8vd6b\" (UID: \"5bb6b6a6-27ae-4724-bd56-4a8f3891595d\") " pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.917736 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bb6b6a6-27ae-4724-bd56-4a8f3891595d-combined-ca-bundle\") pod \"ovn-controller-metrics-8vd6b\" (UID: \"5bb6b6a6-27ae-4724-bd56-4a8f3891595d\") " pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.917792 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2909a8f5-2cbf-42e7-b43c-699344dca6bc-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-rlnpt\" (UID: \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\") " pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.918609 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2909a8f5-2cbf-42e7-b43c-699344dca6bc-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-rlnpt\" (UID: \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\") " pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.919363 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2909a8f5-2cbf-42e7-b43c-699344dca6bc-config\") pod \"dnsmasq-dns-7fd796d7df-rlnpt\" (UID: \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\") " pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.919700 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2909a8f5-2cbf-42e7-b43c-699344dca6bc-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-rlnpt\" (UID: \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\") " pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.942513 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-z5rmn"] Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.963726 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrgft\" (UniqueName: \"kubernetes.io/projected/2909a8f5-2cbf-42e7-b43c-699344dca6bc-kube-api-access-xrgft\") pod \"dnsmasq-dns-7fd796d7df-rlnpt\" (UID: \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\") " pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.967702 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-ms5mm"] Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.968861 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.971064 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 28 11:26:45 crc kubenswrapper[4923]: I1128 11:26:45.989919 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-ms5mm"] Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.012266 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.019062 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.019968 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/5bb6b6a6-27ae-4724-bd56-4a8f3891595d-ovn-rundir\") pod \"ovn-controller-metrics-8vd6b\" (UID: \"5bb6b6a6-27ae-4724-bd56-4a8f3891595d\") " pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.020055 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28tgb\" (UniqueName: \"kubernetes.io/projected/5bb6b6a6-27ae-4724-bd56-4a8f3891595d-kube-api-access-28tgb\") pod \"ovn-controller-metrics-8vd6b\" (UID: \"5bb6b6a6-27ae-4724-bd56-4a8f3891595d\") " pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.020081 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/5bb6b6a6-27ae-4724-bd56-4a8f3891595d-ovs-rundir\") pod \"ovn-controller-metrics-8vd6b\" (UID: \"5bb6b6a6-27ae-4724-bd56-4a8f3891595d\") " pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.020106 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bb6b6a6-27ae-4724-bd56-4a8f3891595d-config\") pod \"ovn-controller-metrics-8vd6b\" (UID: \"5bb6b6a6-27ae-4724-bd56-4a8f3891595d\") " pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.020129 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5bb6b6a6-27ae-4724-bd56-4a8f3891595d-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-8vd6b\" (UID: \"5bb6b6a6-27ae-4724-bd56-4a8f3891595d\") " pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.020152 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bb6b6a6-27ae-4724-bd56-4a8f3891595d-combined-ca-bundle\") pod \"ovn-controller-metrics-8vd6b\" (UID: \"5bb6b6a6-27ae-4724-bd56-4a8f3891595d\") " pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.020597 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/5bb6b6a6-27ae-4724-bd56-4a8f3891595d-ovn-rundir\") pod \"ovn-controller-metrics-8vd6b\" (UID: \"5bb6b6a6-27ae-4724-bd56-4a8f3891595d\") " pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.020620 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/5bb6b6a6-27ae-4724-bd56-4a8f3891595d-ovs-rundir\") pod \"ovn-controller-metrics-8vd6b\" (UID: \"5bb6b6a6-27ae-4724-bd56-4a8f3891595d\") " pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.021186 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bb6b6a6-27ae-4724-bd56-4a8f3891595d-config\") pod \"ovn-controller-metrics-8vd6b\" (UID: \"5bb6b6a6-27ae-4724-bd56-4a8f3891595d\") " pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.025320 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5bb6b6a6-27ae-4724-bd56-4a8f3891595d-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-8vd6b\" (UID: \"5bb6b6a6-27ae-4724-bd56-4a8f3891595d\") " pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.039175 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bb6b6a6-27ae-4724-bd56-4a8f3891595d-combined-ca-bundle\") pod \"ovn-controller-metrics-8vd6b\" (UID: \"5bb6b6a6-27ae-4724-bd56-4a8f3891595d\") " pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.062205 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28tgb\" (UniqueName: \"kubernetes.io/projected/5bb6b6a6-27ae-4724-bd56-4a8f3891595d-kube-api-access-28tgb\") pod \"ovn-controller-metrics-8vd6b\" (UID: \"5bb6b6a6-27ae-4724-bd56-4a8f3891595d\") " pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.121205 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jv7pt\" (UniqueName: \"kubernetes.io/projected/4b2dbc83-52f0-425a-955f-795d78314254-kube-api-access-jv7pt\") pod \"dnsmasq-dns-86db49b7ff-ms5mm\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.121629 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-ms5mm\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.121754 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-config\") pod \"dnsmasq-dns-86db49b7ff-ms5mm\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.121861 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-ms5mm\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.121997 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-ms5mm\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.123856 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-8vd6b" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.226898 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-ms5mm\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.227251 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-ms5mm\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.227332 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jv7pt\" (UniqueName: \"kubernetes.io/projected/4b2dbc83-52f0-425a-955f-795d78314254-kube-api-access-jv7pt\") pod \"dnsmasq-dns-86db49b7ff-ms5mm\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.227350 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-ms5mm\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.227376 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-config\") pod \"dnsmasq-dns-86db49b7ff-ms5mm\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.227832 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-ms5mm\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.228109 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-config\") pod \"dnsmasq-dns-86db49b7ff-ms5mm\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.228586 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-ms5mm\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.229058 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-ms5mm\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.248190 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jv7pt\" (UniqueName: \"kubernetes.io/projected/4b2dbc83-52f0-425a-955f-795d78314254-kube-api-access-jv7pt\") pod \"dnsmasq-dns-86db49b7ff-ms5mm\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.252162 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.287312 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.331329 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.393433 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-rlnpt"] Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.468233 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-8vd6b"] Nov 28 11:26:46 crc kubenswrapper[4923]: I1128 11:26:46.678164 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-ms5mm"] Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.324340 4923 generic.go:334] "Generic (PLEG): container finished" podID="4b2dbc83-52f0-425a-955f-795d78314254" containerID="8e710f915bff9b0cdfdca645aba662df3e60674b9055af08cf04188a2f87d620" exitCode=0 Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.324686 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" event={"ID":"4b2dbc83-52f0-425a-955f-795d78314254","Type":"ContainerDied","Data":"8e710f915bff9b0cdfdca645aba662df3e60674b9055af08cf04188a2f87d620"} Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.324709 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" event={"ID":"4b2dbc83-52f0-425a-955f-795d78314254","Type":"ContainerStarted","Data":"340e3aaa369354bb03761b1e46c7892809e79a31c25b9939cd05a236dfffc9d8"} Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.327354 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-8vd6b" event={"ID":"5bb6b6a6-27ae-4724-bd56-4a8f3891595d","Type":"ContainerStarted","Data":"d57af062d7172a772764027b38a4507f7d9a47b69cdf4a93d8ad4bd5bb4f07c4"} Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.327376 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-8vd6b" event={"ID":"5bb6b6a6-27ae-4724-bd56-4a8f3891595d","Type":"ContainerStarted","Data":"20af6431b65c4049783475953d7f9f6571c7dbefc0060c74b0d47bf99b8c6e39"} Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.329497 4923 generic.go:334] "Generic (PLEG): container finished" podID="2909a8f5-2cbf-42e7-b43c-699344dca6bc" containerID="4fefde10c9b19d04f0ccc3161972eb73a8c0a4883ca4e96bbd1863b1d55e58b6" exitCode=0 Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.330121 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" event={"ID":"2909a8f5-2cbf-42e7-b43c-699344dca6bc","Type":"ContainerDied","Data":"4fefde10c9b19d04f0ccc3161972eb73a8c0a4883ca4e96bbd1863b1d55e58b6"} Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.330161 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" podUID="69a9b4ec-46dc-427a-9398-658ec781a88c" containerName="dnsmasq-dns" containerID="cri-o://006d02cb2fa96661d78770be71569839915c2300da1336064c23e99243cd9960" gracePeriod=10 Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.330178 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" event={"ID":"2909a8f5-2cbf-42e7-b43c-699344dca6bc","Type":"ContainerStarted","Data":"29df96790adacb7ab61c54b73a9c7b9f3e2cac148adbc4bc1fe05e72c06f4bc8"} Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.330619 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" podUID="d4882651-e2b0-4ab5-911f-3f1755c56d18" containerName="dnsmasq-dns" containerID="cri-o://c292ca20290ab83c6b4e7fc01e9085ba9f72393e37cd515046a85867e4b822ef" gracePeriod=10 Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.390583 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.448241 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-8vd6b" podStartSLOduration=2.448204422 podStartE2EDuration="2.448204422s" podCreationTimestamp="2025-11-28 11:26:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:26:47.442660596 +0000 UTC m=+1086.571344806" watchObservedRunningTime="2025-11-28 11:26:47.448204422 +0000 UTC m=+1086.576888632" Nov 28 11:26:47 crc kubenswrapper[4923]: E1128 11:26:47.594492 4923 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd4882651_e2b0_4ab5_911f_3f1755c56d18.slice/crio-c292ca20290ab83c6b4e7fc01e9085ba9f72393e37cd515046a85867e4b822ef.scope\": RecentStats: unable to find data in memory cache]" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.746532 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.748287 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.753184 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.754202 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-p47lq" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.754533 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.756516 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.781757 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.882563 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k68qz\" (UniqueName: \"kubernetes.io/projected/c667717f-96df-453c-af9c-01743e6ec4e2-kube-api-access-k68qz\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.882628 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c667717f-96df-453c-af9c-01743e6ec4e2-config\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.882851 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c667717f-96df-453c-af9c-01743e6ec4e2-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.882881 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c667717f-96df-453c-af9c-01743e6ec4e2-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.882903 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c667717f-96df-453c-af9c-01743e6ec4e2-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.882921 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c667717f-96df-453c-af9c-01743e6ec4e2-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.883020 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c667717f-96df-453c-af9c-01743e6ec4e2-scripts\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.910365 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.986359 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4882651-e2b0-4ab5-911f-3f1755c56d18-config\") pod \"d4882651-e2b0-4ab5-911f-3f1755c56d18\" (UID: \"d4882651-e2b0-4ab5-911f-3f1755c56d18\") " Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.987157 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4882651-e2b0-4ab5-911f-3f1755c56d18-dns-svc\") pod \"d4882651-e2b0-4ab5-911f-3f1755c56d18\" (UID: \"d4882651-e2b0-4ab5-911f-3f1755c56d18\") " Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.987357 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2snr\" (UniqueName: \"kubernetes.io/projected/d4882651-e2b0-4ab5-911f-3f1755c56d18-kube-api-access-n2snr\") pod \"d4882651-e2b0-4ab5-911f-3f1755c56d18\" (UID: \"d4882651-e2b0-4ab5-911f-3f1755c56d18\") " Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.987625 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c667717f-96df-453c-af9c-01743e6ec4e2-scripts\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.987716 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k68qz\" (UniqueName: \"kubernetes.io/projected/c667717f-96df-453c-af9c-01743e6ec4e2-kube-api-access-k68qz\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.987819 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c667717f-96df-453c-af9c-01743e6ec4e2-config\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.987893 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c667717f-96df-453c-af9c-01743e6ec4e2-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.988022 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c667717f-96df-453c-af9c-01743e6ec4e2-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.988854 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c667717f-96df-453c-af9c-01743e6ec4e2-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.989062 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c667717f-96df-453c-af9c-01743e6ec4e2-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.989382 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c667717f-96df-453c-af9c-01743e6ec4e2-config\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.990233 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c667717f-96df-453c-af9c-01743e6ec4e2-scripts\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:47 crc kubenswrapper[4923]: I1128 11:26:47.990945 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c667717f-96df-453c-af9c-01743e6ec4e2-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.006153 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c667717f-96df-453c-af9c-01743e6ec4e2-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.011024 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4882651-e2b0-4ab5-911f-3f1755c56d18-kube-api-access-n2snr" (OuterVolumeSpecName: "kube-api-access-n2snr") pod "d4882651-e2b0-4ab5-911f-3f1755c56d18" (UID: "d4882651-e2b0-4ab5-911f-3f1755c56d18"). InnerVolumeSpecName "kube-api-access-n2snr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.012126 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c667717f-96df-453c-af9c-01743e6ec4e2-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.024389 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c667717f-96df-453c-af9c-01743e6ec4e2-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.041836 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k68qz\" (UniqueName: \"kubernetes.io/projected/c667717f-96df-453c-af9c-01743e6ec4e2-kube-api-access-k68qz\") pod \"ovn-northd-0\" (UID: \"c667717f-96df-453c-af9c-01743e6ec4e2\") " pod="openstack/ovn-northd-0" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.049039 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4882651-e2b0-4ab5-911f-3f1755c56d18-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d4882651-e2b0-4ab5-911f-3f1755c56d18" (UID: "d4882651-e2b0-4ab5-911f-3f1755c56d18"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.087075 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.090138 4923 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4882651-e2b0-4ab5-911f-3f1755c56d18-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.090161 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2snr\" (UniqueName: \"kubernetes.io/projected/d4882651-e2b0-4ab5-911f-3f1755c56d18-kube-api-access-n2snr\") on node \"crc\" DevicePath \"\"" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.101414 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4882651-e2b0-4ab5-911f-3f1755c56d18-config" (OuterVolumeSpecName: "config") pod "d4882651-e2b0-4ab5-911f-3f1755c56d18" (UID: "d4882651-e2b0-4ab5-911f-3f1755c56d18"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.173483 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.196996 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4882651-e2b0-4ab5-911f-3f1755c56d18-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.298583 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9fs9\" (UniqueName: \"kubernetes.io/projected/69a9b4ec-46dc-427a-9398-658ec781a88c-kube-api-access-l9fs9\") pod \"69a9b4ec-46dc-427a-9398-658ec781a88c\" (UID: \"69a9b4ec-46dc-427a-9398-658ec781a88c\") " Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.298986 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/69a9b4ec-46dc-427a-9398-658ec781a88c-dns-svc\") pod \"69a9b4ec-46dc-427a-9398-658ec781a88c\" (UID: \"69a9b4ec-46dc-427a-9398-658ec781a88c\") " Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.299072 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69a9b4ec-46dc-427a-9398-658ec781a88c-config\") pod \"69a9b4ec-46dc-427a-9398-658ec781a88c\" (UID: \"69a9b4ec-46dc-427a-9398-658ec781a88c\") " Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.309108 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69a9b4ec-46dc-427a-9398-658ec781a88c-kube-api-access-l9fs9" (OuterVolumeSpecName: "kube-api-access-l9fs9") pod "69a9b4ec-46dc-427a-9398-658ec781a88c" (UID: "69a9b4ec-46dc-427a-9398-658ec781a88c"). InnerVolumeSpecName "kube-api-access-l9fs9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.339705 4923 generic.go:334] "Generic (PLEG): container finished" podID="69a9b4ec-46dc-427a-9398-658ec781a88c" containerID="006d02cb2fa96661d78770be71569839915c2300da1336064c23e99243cd9960" exitCode=0 Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.339779 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" event={"ID":"69a9b4ec-46dc-427a-9398-658ec781a88c","Type":"ContainerDied","Data":"006d02cb2fa96661d78770be71569839915c2300da1336064c23e99243cd9960"} Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.339808 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" event={"ID":"69a9b4ec-46dc-427a-9398-658ec781a88c","Type":"ContainerDied","Data":"b1185c328e52ddb168714df20b1dd82f7d105662a66a5efbc5e9cbb4acf13f17"} Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.339826 4923 scope.go:117] "RemoveContainer" containerID="006d02cb2fa96661d78770be71569839915c2300da1336064c23e99243cd9960" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.340056 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-shqft" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.342404 4923 generic.go:334] "Generic (PLEG): container finished" podID="d4882651-e2b0-4ab5-911f-3f1755c56d18" containerID="c292ca20290ab83c6b4e7fc01e9085ba9f72393e37cd515046a85867e4b822ef" exitCode=0 Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.342485 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" event={"ID":"d4882651-e2b0-4ab5-911f-3f1755c56d18","Type":"ContainerDied","Data":"c292ca20290ab83c6b4e7fc01e9085ba9f72393e37cd515046a85867e4b822ef"} Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.342505 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.342513 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-z5rmn" event={"ID":"d4882651-e2b0-4ab5-911f-3f1755c56d18","Type":"ContainerDied","Data":"cdf7b68abfee4906f81a2f2139bede7f21cb1f10cfbd0cd4fe58a286d52f798f"} Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.344775 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" event={"ID":"2909a8f5-2cbf-42e7-b43c-699344dca6bc","Type":"ContainerStarted","Data":"69397d9ba20e7bc1f0ae5f496d4ead474d5a86c821b8c396d901729b00f8464e"} Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.344921 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.347239 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69a9b4ec-46dc-427a-9398-658ec781a88c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "69a9b4ec-46dc-427a-9398-658ec781a88c" (UID: "69a9b4ec-46dc-427a-9398-658ec781a88c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.347650 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" event={"ID":"4b2dbc83-52f0-425a-955f-795d78314254","Type":"ContainerStarted","Data":"e63afc1c95653740ecf9248d90d0fc1bc2e2f6dc858f6f30b7d0d888e9ff99b1"} Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.348592 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.361667 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69a9b4ec-46dc-427a-9398-658ec781a88c-config" (OuterVolumeSpecName: "config") pod "69a9b4ec-46dc-427a-9398-658ec781a88c" (UID: "69a9b4ec-46dc-427a-9398-658ec781a88c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.375337 4923 scope.go:117] "RemoveContainer" containerID="d27c2a2ea3a5a51f59451051f71cf1410748e117ce728726e6cabe31c76e03f9" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.380890 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" podStartSLOduration=3.380867849 podStartE2EDuration="3.380867849s" podCreationTimestamp="2025-11-28 11:26:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:26:48.376057824 +0000 UTC m=+1087.504742034" watchObservedRunningTime="2025-11-28 11:26:48.380867849 +0000 UTC m=+1087.509552059" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.406040 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" podStartSLOduration=3.406022547 podStartE2EDuration="3.406022547s" podCreationTimestamp="2025-11-28 11:26:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:26:48.395058228 +0000 UTC m=+1087.523742438" watchObservedRunningTime="2025-11-28 11:26:48.406022547 +0000 UTC m=+1087.534706757" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.406828 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-z5rmn"] Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.406973 4923 scope.go:117] "RemoveContainer" containerID="006d02cb2fa96661d78770be71569839915c2300da1336064c23e99243cd9960" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.407272 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9fs9\" (UniqueName: \"kubernetes.io/projected/69a9b4ec-46dc-427a-9398-658ec781a88c-kube-api-access-l9fs9\") on node \"crc\" DevicePath \"\"" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.407292 4923 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/69a9b4ec-46dc-427a-9398-658ec781a88c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.407301 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69a9b4ec-46dc-427a-9398-658ec781a88c-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:26:48 crc kubenswrapper[4923]: E1128 11:26:48.411221 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"006d02cb2fa96661d78770be71569839915c2300da1336064c23e99243cd9960\": container with ID starting with 006d02cb2fa96661d78770be71569839915c2300da1336064c23e99243cd9960 not found: ID does not exist" containerID="006d02cb2fa96661d78770be71569839915c2300da1336064c23e99243cd9960" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.411257 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"006d02cb2fa96661d78770be71569839915c2300da1336064c23e99243cd9960"} err="failed to get container status \"006d02cb2fa96661d78770be71569839915c2300da1336064c23e99243cd9960\": rpc error: code = NotFound desc = could not find container \"006d02cb2fa96661d78770be71569839915c2300da1336064c23e99243cd9960\": container with ID starting with 006d02cb2fa96661d78770be71569839915c2300da1336064c23e99243cd9960 not found: ID does not exist" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.411282 4923 scope.go:117] "RemoveContainer" containerID="d27c2a2ea3a5a51f59451051f71cf1410748e117ce728726e6cabe31c76e03f9" Nov 28 11:26:48 crc kubenswrapper[4923]: E1128 11:26:48.411619 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d27c2a2ea3a5a51f59451051f71cf1410748e117ce728726e6cabe31c76e03f9\": container with ID starting with d27c2a2ea3a5a51f59451051f71cf1410748e117ce728726e6cabe31c76e03f9 not found: ID does not exist" containerID="d27c2a2ea3a5a51f59451051f71cf1410748e117ce728726e6cabe31c76e03f9" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.411642 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d27c2a2ea3a5a51f59451051f71cf1410748e117ce728726e6cabe31c76e03f9"} err="failed to get container status \"d27c2a2ea3a5a51f59451051f71cf1410748e117ce728726e6cabe31c76e03f9\": rpc error: code = NotFound desc = could not find container \"d27c2a2ea3a5a51f59451051f71cf1410748e117ce728726e6cabe31c76e03f9\": container with ID starting with d27c2a2ea3a5a51f59451051f71cf1410748e117ce728726e6cabe31c76e03f9 not found: ID does not exist" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.411655 4923 scope.go:117] "RemoveContainer" containerID="c292ca20290ab83c6b4e7fc01e9085ba9f72393e37cd515046a85867e4b822ef" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.414216 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-z5rmn"] Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.431912 4923 scope.go:117] "RemoveContainer" containerID="05df6b674f932ea03cf7a87795478a6a29700e564006f5a275fc379534bc7239" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.448712 4923 scope.go:117] "RemoveContainer" containerID="c292ca20290ab83c6b4e7fc01e9085ba9f72393e37cd515046a85867e4b822ef" Nov 28 11:26:48 crc kubenswrapper[4923]: E1128 11:26:48.449111 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c292ca20290ab83c6b4e7fc01e9085ba9f72393e37cd515046a85867e4b822ef\": container with ID starting with c292ca20290ab83c6b4e7fc01e9085ba9f72393e37cd515046a85867e4b822ef not found: ID does not exist" containerID="c292ca20290ab83c6b4e7fc01e9085ba9f72393e37cd515046a85867e4b822ef" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.449168 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c292ca20290ab83c6b4e7fc01e9085ba9f72393e37cd515046a85867e4b822ef"} err="failed to get container status \"c292ca20290ab83c6b4e7fc01e9085ba9f72393e37cd515046a85867e4b822ef\": rpc error: code = NotFound desc = could not find container \"c292ca20290ab83c6b4e7fc01e9085ba9f72393e37cd515046a85867e4b822ef\": container with ID starting with c292ca20290ab83c6b4e7fc01e9085ba9f72393e37cd515046a85867e4b822ef not found: ID does not exist" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.449204 4923 scope.go:117] "RemoveContainer" containerID="05df6b674f932ea03cf7a87795478a6a29700e564006f5a275fc379534bc7239" Nov 28 11:26:48 crc kubenswrapper[4923]: E1128 11:26:48.449620 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05df6b674f932ea03cf7a87795478a6a29700e564006f5a275fc379534bc7239\": container with ID starting with 05df6b674f932ea03cf7a87795478a6a29700e564006f5a275fc379534bc7239 not found: ID does not exist" containerID="05df6b674f932ea03cf7a87795478a6a29700e564006f5a275fc379534bc7239" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.449659 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05df6b674f932ea03cf7a87795478a6a29700e564006f5a275fc379534bc7239"} err="failed to get container status \"05df6b674f932ea03cf7a87795478a6a29700e564006f5a275fc379534bc7239\": rpc error: code = NotFound desc = could not find container \"05df6b674f932ea03cf7a87795478a6a29700e564006f5a275fc379534bc7239\": container with ID starting with 05df6b674f932ea03cf7a87795478a6a29700e564006f5a275fc379534bc7239 not found: ID does not exist" Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.554540 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.668826 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-shqft"] Nov 28 11:26:48 crc kubenswrapper[4923]: I1128 11:26:48.674703 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-shqft"] Nov 28 11:26:49 crc kubenswrapper[4923]: I1128 11:26:49.182725 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69a9b4ec-46dc-427a-9398-658ec781a88c" path="/var/lib/kubelet/pods/69a9b4ec-46dc-427a-9398-658ec781a88c/volumes" Nov 28 11:26:49 crc kubenswrapper[4923]: I1128 11:26:49.183526 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4882651-e2b0-4ab5-911f-3f1755c56d18" path="/var/lib/kubelet/pods/d4882651-e2b0-4ab5-911f-3f1755c56d18/volumes" Nov 28 11:26:49 crc kubenswrapper[4923]: I1128 11:26:49.357477 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c667717f-96df-453c-af9c-01743e6ec4e2","Type":"ContainerStarted","Data":"faa4d4834ebc3547686acc06244355a99595009c204b3eaef92791bf626ccd80"} Nov 28 11:26:49 crc kubenswrapper[4923]: I1128 11:26:49.474964 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 28 11:26:49 crc kubenswrapper[4923]: I1128 11:26:49.475057 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 28 11:26:50 crc kubenswrapper[4923]: I1128 11:26:50.366519 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c667717f-96df-453c-af9c-01743e6ec4e2","Type":"ContainerStarted","Data":"0c173106bba513fa3c418e92e25d47f624369031eb06dcaae371dd8b86b083b5"} Nov 28 11:26:50 crc kubenswrapper[4923]: I1128 11:26:50.366889 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c667717f-96df-453c-af9c-01743e6ec4e2","Type":"ContainerStarted","Data":"8f0ae8cde67ba407a5c68ec1736a4856204e6f2094471ddc65957b1a4689c90f"} Nov 28 11:26:50 crc kubenswrapper[4923]: I1128 11:26:50.366921 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 28 11:26:50 crc kubenswrapper[4923]: I1128 11:26:50.396342 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.175985716 podStartE2EDuration="3.396325788s" podCreationTimestamp="2025-11-28 11:26:47 +0000 UTC" firstStartedPulling="2025-11-28 11:26:48.571202186 +0000 UTC m=+1087.699886386" lastFinishedPulling="2025-11-28 11:26:49.791542248 +0000 UTC m=+1088.920226458" observedRunningTime="2025-11-28 11:26:50.391883443 +0000 UTC m=+1089.520567663" watchObservedRunningTime="2025-11-28 11:26:50.396325788 +0000 UTC m=+1089.525010008" Nov 28 11:26:50 crc kubenswrapper[4923]: I1128 11:26:50.878362 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:50 crc kubenswrapper[4923]: I1128 11:26:50.878408 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:50 crc kubenswrapper[4923]: I1128 11:26:50.994394 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:51 crc kubenswrapper[4923]: I1128 11:26:51.490360 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 28 11:26:51 crc kubenswrapper[4923]: I1128 11:26:51.739991 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 28 11:26:51 crc kubenswrapper[4923]: I1128 11:26:51.827280 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 28 11:26:52 crc kubenswrapper[4923]: I1128 11:26:52.940710 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.014062 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.289969 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.365389 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-13ae-account-create-update-5xj8w"] Nov 28 11:26:56 crc kubenswrapper[4923]: E1128 11:26:56.365769 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69a9b4ec-46dc-427a-9398-658ec781a88c" containerName="dnsmasq-dns" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.365785 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="69a9b4ec-46dc-427a-9398-658ec781a88c" containerName="dnsmasq-dns" Nov 28 11:26:56 crc kubenswrapper[4923]: E1128 11:26:56.365805 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4882651-e2b0-4ab5-911f-3f1755c56d18" containerName="dnsmasq-dns" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.365813 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4882651-e2b0-4ab5-911f-3f1755c56d18" containerName="dnsmasq-dns" Nov 28 11:26:56 crc kubenswrapper[4923]: E1128 11:26:56.365822 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69a9b4ec-46dc-427a-9398-658ec781a88c" containerName="init" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.365828 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="69a9b4ec-46dc-427a-9398-658ec781a88c" containerName="init" Nov 28 11:26:56 crc kubenswrapper[4923]: E1128 11:26:56.365853 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4882651-e2b0-4ab5-911f-3f1755c56d18" containerName="init" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.365858 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4882651-e2b0-4ab5-911f-3f1755c56d18" containerName="init" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.366023 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4882651-e2b0-4ab5-911f-3f1755c56d18" containerName="dnsmasq-dns" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.366032 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="69a9b4ec-46dc-427a-9398-658ec781a88c" containerName="dnsmasq-dns" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.366549 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-13ae-account-create-update-5xj8w" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.374175 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.375108 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-8dbh7"] Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.376004 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-8dbh7" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.386091 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-rlnpt"] Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.397874 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-13ae-account-create-update-5xj8w"] Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.405272 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-8dbh7"] Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.416808 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" podUID="2909a8f5-2cbf-42e7-b43c-699344dca6bc" containerName="dnsmasq-dns" containerID="cri-o://69397d9ba20e7bc1f0ae5f496d4ead474d5a86c821b8c396d901729b00f8464e" gracePeriod=10 Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.484032 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cjm9\" (UniqueName: \"kubernetes.io/projected/ebc90f13-4ffb-450c-a5ca-16053a5111d5-kube-api-access-4cjm9\") pod \"glance-13ae-account-create-update-5xj8w\" (UID: \"ebc90f13-4ffb-450c-a5ca-16053a5111d5\") " pod="openstack/glance-13ae-account-create-update-5xj8w" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.484140 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebc90f13-4ffb-450c-a5ca-16053a5111d5-operator-scripts\") pod \"glance-13ae-account-create-update-5xj8w\" (UID: \"ebc90f13-4ffb-450c-a5ca-16053a5111d5\") " pod="openstack/glance-13ae-account-create-update-5xj8w" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.484179 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gd4n7\" (UniqueName: \"kubernetes.io/projected/c2a257ac-b03d-4c96-b2e5-bd306b02b489-kube-api-access-gd4n7\") pod \"glance-db-create-8dbh7\" (UID: \"c2a257ac-b03d-4c96-b2e5-bd306b02b489\") " pod="openstack/glance-db-create-8dbh7" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.484210 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2a257ac-b03d-4c96-b2e5-bd306b02b489-operator-scripts\") pod \"glance-db-create-8dbh7\" (UID: \"c2a257ac-b03d-4c96-b2e5-bd306b02b489\") " pod="openstack/glance-db-create-8dbh7" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.585714 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebc90f13-4ffb-450c-a5ca-16053a5111d5-operator-scripts\") pod \"glance-13ae-account-create-update-5xj8w\" (UID: \"ebc90f13-4ffb-450c-a5ca-16053a5111d5\") " pod="openstack/glance-13ae-account-create-update-5xj8w" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.585764 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gd4n7\" (UniqueName: \"kubernetes.io/projected/c2a257ac-b03d-4c96-b2e5-bd306b02b489-kube-api-access-gd4n7\") pod \"glance-db-create-8dbh7\" (UID: \"c2a257ac-b03d-4c96-b2e5-bd306b02b489\") " pod="openstack/glance-db-create-8dbh7" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.585787 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2a257ac-b03d-4c96-b2e5-bd306b02b489-operator-scripts\") pod \"glance-db-create-8dbh7\" (UID: \"c2a257ac-b03d-4c96-b2e5-bd306b02b489\") " pod="openstack/glance-db-create-8dbh7" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.585844 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cjm9\" (UniqueName: \"kubernetes.io/projected/ebc90f13-4ffb-450c-a5ca-16053a5111d5-kube-api-access-4cjm9\") pod \"glance-13ae-account-create-update-5xj8w\" (UID: \"ebc90f13-4ffb-450c-a5ca-16053a5111d5\") " pod="openstack/glance-13ae-account-create-update-5xj8w" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.586694 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebc90f13-4ffb-450c-a5ca-16053a5111d5-operator-scripts\") pod \"glance-13ae-account-create-update-5xj8w\" (UID: \"ebc90f13-4ffb-450c-a5ca-16053a5111d5\") " pod="openstack/glance-13ae-account-create-update-5xj8w" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.587309 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2a257ac-b03d-4c96-b2e5-bd306b02b489-operator-scripts\") pod \"glance-db-create-8dbh7\" (UID: \"c2a257ac-b03d-4c96-b2e5-bd306b02b489\") " pod="openstack/glance-db-create-8dbh7" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.605473 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cjm9\" (UniqueName: \"kubernetes.io/projected/ebc90f13-4ffb-450c-a5ca-16053a5111d5-kube-api-access-4cjm9\") pod \"glance-13ae-account-create-update-5xj8w\" (UID: \"ebc90f13-4ffb-450c-a5ca-16053a5111d5\") " pod="openstack/glance-13ae-account-create-update-5xj8w" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.606504 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gd4n7\" (UniqueName: \"kubernetes.io/projected/c2a257ac-b03d-4c96-b2e5-bd306b02b489-kube-api-access-gd4n7\") pod \"glance-db-create-8dbh7\" (UID: \"c2a257ac-b03d-4c96-b2e5-bd306b02b489\") " pod="openstack/glance-db-create-8dbh7" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.687282 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-13ae-account-create-update-5xj8w" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.696226 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-8dbh7" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.925599 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.991602 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrgft\" (UniqueName: \"kubernetes.io/projected/2909a8f5-2cbf-42e7-b43c-699344dca6bc-kube-api-access-xrgft\") pod \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\" (UID: \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\") " Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.991888 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2909a8f5-2cbf-42e7-b43c-699344dca6bc-dns-svc\") pod \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\" (UID: \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\") " Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.991907 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2909a8f5-2cbf-42e7-b43c-699344dca6bc-ovsdbserver-nb\") pod \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\" (UID: \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\") " Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.991994 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2909a8f5-2cbf-42e7-b43c-699344dca6bc-config\") pod \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\" (UID: \"2909a8f5-2cbf-42e7-b43c-699344dca6bc\") " Nov 28 11:26:56 crc kubenswrapper[4923]: I1128 11:26:56.996513 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2909a8f5-2cbf-42e7-b43c-699344dca6bc-kube-api-access-xrgft" (OuterVolumeSpecName: "kube-api-access-xrgft") pod "2909a8f5-2cbf-42e7-b43c-699344dca6bc" (UID: "2909a8f5-2cbf-42e7-b43c-699344dca6bc"). InnerVolumeSpecName "kube-api-access-xrgft". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.040584 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2909a8f5-2cbf-42e7-b43c-699344dca6bc-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2909a8f5-2cbf-42e7-b43c-699344dca6bc" (UID: "2909a8f5-2cbf-42e7-b43c-699344dca6bc"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.055727 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2909a8f5-2cbf-42e7-b43c-699344dca6bc-config" (OuterVolumeSpecName: "config") pod "2909a8f5-2cbf-42e7-b43c-699344dca6bc" (UID: "2909a8f5-2cbf-42e7-b43c-699344dca6bc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.056854 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2909a8f5-2cbf-42e7-b43c-699344dca6bc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2909a8f5-2cbf-42e7-b43c-699344dca6bc" (UID: "2909a8f5-2cbf-42e7-b43c-699344dca6bc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.094218 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrgft\" (UniqueName: \"kubernetes.io/projected/2909a8f5-2cbf-42e7-b43c-699344dca6bc-kube-api-access-xrgft\") on node \"crc\" DevicePath \"\"" Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.094256 4923 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2909a8f5-2cbf-42e7-b43c-699344dca6bc-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.094270 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2909a8f5-2cbf-42e7-b43c-699344dca6bc-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.094280 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2909a8f5-2cbf-42e7-b43c-699344dca6bc-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.253448 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-8dbh7"] Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.300798 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-13ae-account-create-update-5xj8w"] Nov 28 11:26:57 crc kubenswrapper[4923]: W1128 11:26:57.306477 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podebc90f13_4ffb_450c_a5ca_16053a5111d5.slice/crio-3f989a6ae4c3836cb2e556b20caa8c607d7ab37b16e0459a26a61550c850b487 WatchSource:0}: Error finding container 3f989a6ae4c3836cb2e556b20caa8c607d7ab37b16e0459a26a61550c850b487: Status 404 returned error can't find the container with id 3f989a6ae4c3836cb2e556b20caa8c607d7ab37b16e0459a26a61550c850b487 Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.426250 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-8dbh7" event={"ID":"c2a257ac-b03d-4c96-b2e5-bd306b02b489","Type":"ContainerStarted","Data":"fea46f7f844f2d88fa6a0767377d40b520777029abf99fbe689f42f3efe54b89"} Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.426316 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-8dbh7" event={"ID":"c2a257ac-b03d-4c96-b2e5-bd306b02b489","Type":"ContainerStarted","Data":"c80f6b466884b03c91285383353f6ab4ed16f5722faefe0e2028a2f45655db62"} Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.428888 4923 generic.go:334] "Generic (PLEG): container finished" podID="2909a8f5-2cbf-42e7-b43c-699344dca6bc" containerID="69397d9ba20e7bc1f0ae5f496d4ead474d5a86c821b8c396d901729b00f8464e" exitCode=0 Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.428945 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.428947 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" event={"ID":"2909a8f5-2cbf-42e7-b43c-699344dca6bc","Type":"ContainerDied","Data":"69397d9ba20e7bc1f0ae5f496d4ead474d5a86c821b8c396d901729b00f8464e"} Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.429031 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-rlnpt" event={"ID":"2909a8f5-2cbf-42e7-b43c-699344dca6bc","Type":"ContainerDied","Data":"29df96790adacb7ab61c54b73a9c7b9f3e2cac148adbc4bc1fe05e72c06f4bc8"} Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.429055 4923 scope.go:117] "RemoveContainer" containerID="69397d9ba20e7bc1f0ae5f496d4ead474d5a86c821b8c396d901729b00f8464e" Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.430839 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-13ae-account-create-update-5xj8w" event={"ID":"ebc90f13-4ffb-450c-a5ca-16053a5111d5","Type":"ContainerStarted","Data":"f2c9315382360f2405010c5b7588c9e9f094c9dbe4ebab29b449b0c669e4199d"} Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.430864 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-13ae-account-create-update-5xj8w" event={"ID":"ebc90f13-4ffb-450c-a5ca-16053a5111d5","Type":"ContainerStarted","Data":"3f989a6ae4c3836cb2e556b20caa8c607d7ab37b16e0459a26a61550c850b487"} Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.450734 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-8dbh7" podStartSLOduration=1.450714323 podStartE2EDuration="1.450714323s" podCreationTimestamp="2025-11-28 11:26:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:26:57.442315917 +0000 UTC m=+1096.571000147" watchObservedRunningTime="2025-11-28 11:26:57.450714323 +0000 UTC m=+1096.579398533" Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.451294 4923 scope.go:117] "RemoveContainer" containerID="4fefde10c9b19d04f0ccc3161972eb73a8c0a4883ca4e96bbd1863b1d55e58b6" Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.470899 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-13ae-account-create-update-5xj8w" podStartSLOduration=1.470883841 podStartE2EDuration="1.470883841s" podCreationTimestamp="2025-11-28 11:26:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:26:57.464745048 +0000 UTC m=+1096.593429268" watchObservedRunningTime="2025-11-28 11:26:57.470883841 +0000 UTC m=+1096.599568041" Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.478378 4923 scope.go:117] "RemoveContainer" containerID="69397d9ba20e7bc1f0ae5f496d4ead474d5a86c821b8c396d901729b00f8464e" Nov 28 11:26:57 crc kubenswrapper[4923]: E1128 11:26:57.480870 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69397d9ba20e7bc1f0ae5f496d4ead474d5a86c821b8c396d901729b00f8464e\": container with ID starting with 69397d9ba20e7bc1f0ae5f496d4ead474d5a86c821b8c396d901729b00f8464e not found: ID does not exist" containerID="69397d9ba20e7bc1f0ae5f496d4ead474d5a86c821b8c396d901729b00f8464e" Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.480909 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69397d9ba20e7bc1f0ae5f496d4ead474d5a86c821b8c396d901729b00f8464e"} err="failed to get container status \"69397d9ba20e7bc1f0ae5f496d4ead474d5a86c821b8c396d901729b00f8464e\": rpc error: code = NotFound desc = could not find container \"69397d9ba20e7bc1f0ae5f496d4ead474d5a86c821b8c396d901729b00f8464e\": container with ID starting with 69397d9ba20e7bc1f0ae5f496d4ead474d5a86c821b8c396d901729b00f8464e not found: ID does not exist" Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.480991 4923 scope.go:117] "RemoveContainer" containerID="4fefde10c9b19d04f0ccc3161972eb73a8c0a4883ca4e96bbd1863b1d55e58b6" Nov 28 11:26:57 crc kubenswrapper[4923]: E1128 11:26:57.481324 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fefde10c9b19d04f0ccc3161972eb73a8c0a4883ca4e96bbd1863b1d55e58b6\": container with ID starting with 4fefde10c9b19d04f0ccc3161972eb73a8c0a4883ca4e96bbd1863b1d55e58b6 not found: ID does not exist" containerID="4fefde10c9b19d04f0ccc3161972eb73a8c0a4883ca4e96bbd1863b1d55e58b6" Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.481346 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fefde10c9b19d04f0ccc3161972eb73a8c0a4883ca4e96bbd1863b1d55e58b6"} err="failed to get container status \"4fefde10c9b19d04f0ccc3161972eb73a8c0a4883ca4e96bbd1863b1d55e58b6\": rpc error: code = NotFound desc = could not find container \"4fefde10c9b19d04f0ccc3161972eb73a8c0a4883ca4e96bbd1863b1d55e58b6\": container with ID starting with 4fefde10c9b19d04f0ccc3161972eb73a8c0a4883ca4e96bbd1863b1d55e58b6 not found: ID does not exist" Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.486098 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-rlnpt"] Nov 28 11:26:57 crc kubenswrapper[4923]: I1128 11:26:57.493294 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-rlnpt"] Nov 28 11:26:57 crc kubenswrapper[4923]: E1128 11:26:57.810012 4923 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podebc90f13_4ffb_450c_a5ca_16053a5111d5.slice/crio-conmon-f2c9315382360f2405010c5b7588c9e9f094c9dbe4ebab29b449b0c669e4199d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podebc90f13_4ffb_450c_a5ca_16053a5111d5.slice/crio-f2c9315382360f2405010c5b7588c9e9f094c9dbe4ebab29b449b0c669e4199d.scope\": RecentStats: unable to find data in memory cache]" Nov 28 11:26:58 crc kubenswrapper[4923]: I1128 11:26:58.443198 4923 generic.go:334] "Generic (PLEG): container finished" podID="c2a257ac-b03d-4c96-b2e5-bd306b02b489" containerID="fea46f7f844f2d88fa6a0767377d40b520777029abf99fbe689f42f3efe54b89" exitCode=0 Nov 28 11:26:58 crc kubenswrapper[4923]: I1128 11:26:58.443290 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-8dbh7" event={"ID":"c2a257ac-b03d-4c96-b2e5-bd306b02b489","Type":"ContainerDied","Data":"fea46f7f844f2d88fa6a0767377d40b520777029abf99fbe689f42f3efe54b89"} Nov 28 11:26:58 crc kubenswrapper[4923]: I1128 11:26:58.448152 4923 generic.go:334] "Generic (PLEG): container finished" podID="ebc90f13-4ffb-450c-a5ca-16053a5111d5" containerID="f2c9315382360f2405010c5b7588c9e9f094c9dbe4ebab29b449b0c669e4199d" exitCode=0 Nov 28 11:26:58 crc kubenswrapper[4923]: I1128 11:26:58.448189 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-13ae-account-create-update-5xj8w" event={"ID":"ebc90f13-4ffb-450c-a5ca-16053a5111d5","Type":"ContainerDied","Data":"f2c9315382360f2405010c5b7588c9e9f094c9dbe4ebab29b449b0c669e4199d"} Nov 28 11:26:59 crc kubenswrapper[4923]: I1128 11:26:59.190624 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2909a8f5-2cbf-42e7-b43c-699344dca6bc" path="/var/lib/kubelet/pods/2909a8f5-2cbf-42e7-b43c-699344dca6bc/volumes" Nov 28 11:26:59 crc kubenswrapper[4923]: I1128 11:26:59.923607 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-13ae-account-create-update-5xj8w" Nov 28 11:26:59 crc kubenswrapper[4923]: I1128 11:26:59.951800 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4cjm9\" (UniqueName: \"kubernetes.io/projected/ebc90f13-4ffb-450c-a5ca-16053a5111d5-kube-api-access-4cjm9\") pod \"ebc90f13-4ffb-450c-a5ca-16053a5111d5\" (UID: \"ebc90f13-4ffb-450c-a5ca-16053a5111d5\") " Nov 28 11:26:59 crc kubenswrapper[4923]: I1128 11:26:59.951899 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebc90f13-4ffb-450c-a5ca-16053a5111d5-operator-scripts\") pod \"ebc90f13-4ffb-450c-a5ca-16053a5111d5\" (UID: \"ebc90f13-4ffb-450c-a5ca-16053a5111d5\") " Nov 28 11:26:59 crc kubenswrapper[4923]: I1128 11:26:59.952844 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ebc90f13-4ffb-450c-a5ca-16053a5111d5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ebc90f13-4ffb-450c-a5ca-16053a5111d5" (UID: "ebc90f13-4ffb-450c-a5ca-16053a5111d5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:26:59 crc kubenswrapper[4923]: I1128 11:26:59.957287 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebc90f13-4ffb-450c-a5ca-16053a5111d5-kube-api-access-4cjm9" (OuterVolumeSpecName: "kube-api-access-4cjm9") pod "ebc90f13-4ffb-450c-a5ca-16053a5111d5" (UID: "ebc90f13-4ffb-450c-a5ca-16053a5111d5"). InnerVolumeSpecName "kube-api-access-4cjm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.021682 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-8dbh7" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.058674 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gd4n7\" (UniqueName: \"kubernetes.io/projected/c2a257ac-b03d-4c96-b2e5-bd306b02b489-kube-api-access-gd4n7\") pod \"c2a257ac-b03d-4c96-b2e5-bd306b02b489\" (UID: \"c2a257ac-b03d-4c96-b2e5-bd306b02b489\") " Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.059028 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2a257ac-b03d-4c96-b2e5-bd306b02b489-operator-scripts\") pod \"c2a257ac-b03d-4c96-b2e5-bd306b02b489\" (UID: \"c2a257ac-b03d-4c96-b2e5-bd306b02b489\") " Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.059775 4923 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebc90f13-4ffb-450c-a5ca-16053a5111d5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.059794 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4cjm9\" (UniqueName: \"kubernetes.io/projected/ebc90f13-4ffb-450c-a5ca-16053a5111d5-kube-api-access-4cjm9\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.060207 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c2a257ac-b03d-4c96-b2e5-bd306b02b489-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c2a257ac-b03d-4c96-b2e5-bd306b02b489" (UID: "c2a257ac-b03d-4c96-b2e5-bd306b02b489"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.078695 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2a257ac-b03d-4c96-b2e5-bd306b02b489-kube-api-access-gd4n7" (OuterVolumeSpecName: "kube-api-access-gd4n7") pod "c2a257ac-b03d-4c96-b2e5-bd306b02b489" (UID: "c2a257ac-b03d-4c96-b2e5-bd306b02b489"). InnerVolumeSpecName "kube-api-access-gd4n7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.160811 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gd4n7\" (UniqueName: \"kubernetes.io/projected/c2a257ac-b03d-4c96-b2e5-bd306b02b489-kube-api-access-gd4n7\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.160846 4923 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2a257ac-b03d-4c96-b2e5-bd306b02b489-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.470180 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-8dbh7" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.470190 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-8dbh7" event={"ID":"c2a257ac-b03d-4c96-b2e5-bd306b02b489","Type":"ContainerDied","Data":"c80f6b466884b03c91285383353f6ab4ed16f5722faefe0e2028a2f45655db62"} Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.470443 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c80f6b466884b03c91285383353f6ab4ed16f5722faefe0e2028a2f45655db62" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.474357 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-13ae-account-create-update-5xj8w" event={"ID":"ebc90f13-4ffb-450c-a5ca-16053a5111d5","Type":"ContainerDied","Data":"3f989a6ae4c3836cb2e556b20caa8c607d7ab37b16e0459a26a61550c850b487"} Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.474416 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f989a6ae4c3836cb2e556b20caa8c607d7ab37b16e0459a26a61550c850b487" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.474440 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-13ae-account-create-update-5xj8w" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.577621 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-hjzh6"] Nov 28 11:27:00 crc kubenswrapper[4923]: E1128 11:27:00.579986 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebc90f13-4ffb-450c-a5ca-16053a5111d5" containerName="mariadb-account-create-update" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.580009 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebc90f13-4ffb-450c-a5ca-16053a5111d5" containerName="mariadb-account-create-update" Nov 28 11:27:00 crc kubenswrapper[4923]: E1128 11:27:00.580035 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2909a8f5-2cbf-42e7-b43c-699344dca6bc" containerName="init" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.580044 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="2909a8f5-2cbf-42e7-b43c-699344dca6bc" containerName="init" Nov 28 11:27:00 crc kubenswrapper[4923]: E1128 11:27:00.580055 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2a257ac-b03d-4c96-b2e5-bd306b02b489" containerName="mariadb-database-create" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.580064 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2a257ac-b03d-4c96-b2e5-bd306b02b489" containerName="mariadb-database-create" Nov 28 11:27:00 crc kubenswrapper[4923]: E1128 11:27:00.580079 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2909a8f5-2cbf-42e7-b43c-699344dca6bc" containerName="dnsmasq-dns" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.580087 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="2909a8f5-2cbf-42e7-b43c-699344dca6bc" containerName="dnsmasq-dns" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.580280 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="2909a8f5-2cbf-42e7-b43c-699344dca6bc" containerName="dnsmasq-dns" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.580297 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebc90f13-4ffb-450c-a5ca-16053a5111d5" containerName="mariadb-account-create-update" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.580309 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2a257ac-b03d-4c96-b2e5-bd306b02b489" containerName="mariadb-database-create" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.580918 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-hjzh6" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.587170 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-hjzh6"] Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.668801 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6529789-5374-4f6a-89a8-338ebb535753-operator-scripts\") pod \"keystone-db-create-hjzh6\" (UID: \"f6529789-5374-4f6a-89a8-338ebb535753\") " pod="openstack/keystone-db-create-hjzh6" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.668961 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpd5v\" (UniqueName: \"kubernetes.io/projected/f6529789-5374-4f6a-89a8-338ebb535753-kube-api-access-gpd5v\") pod \"keystone-db-create-hjzh6\" (UID: \"f6529789-5374-4f6a-89a8-338ebb535753\") " pod="openstack/keystone-db-create-hjzh6" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.677202 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-0809-account-create-update-vkdf5"] Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.678081 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0809-account-create-update-vkdf5" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.679900 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.690251 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-0809-account-create-update-vkdf5"] Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.770457 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6529789-5374-4f6a-89a8-338ebb535753-operator-scripts\") pod \"keystone-db-create-hjzh6\" (UID: \"f6529789-5374-4f6a-89a8-338ebb535753\") " pod="openstack/keystone-db-create-hjzh6" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.770578 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpd5v\" (UniqueName: \"kubernetes.io/projected/f6529789-5374-4f6a-89a8-338ebb535753-kube-api-access-gpd5v\") pod \"keystone-db-create-hjzh6\" (UID: \"f6529789-5374-4f6a-89a8-338ebb535753\") " pod="openstack/keystone-db-create-hjzh6" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.770610 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c1fa907-042f-413b-abcc-4e6fbf23b382-operator-scripts\") pod \"keystone-0809-account-create-update-vkdf5\" (UID: \"9c1fa907-042f-413b-abcc-4e6fbf23b382\") " pod="openstack/keystone-0809-account-create-update-vkdf5" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.770651 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44prn\" (UniqueName: \"kubernetes.io/projected/9c1fa907-042f-413b-abcc-4e6fbf23b382-kube-api-access-44prn\") pod \"keystone-0809-account-create-update-vkdf5\" (UID: \"9c1fa907-042f-413b-abcc-4e6fbf23b382\") " pod="openstack/keystone-0809-account-create-update-vkdf5" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.771336 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6529789-5374-4f6a-89a8-338ebb535753-operator-scripts\") pod \"keystone-db-create-hjzh6\" (UID: \"f6529789-5374-4f6a-89a8-338ebb535753\") " pod="openstack/keystone-db-create-hjzh6" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.786846 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpd5v\" (UniqueName: \"kubernetes.io/projected/f6529789-5374-4f6a-89a8-338ebb535753-kube-api-access-gpd5v\") pod \"keystone-db-create-hjzh6\" (UID: \"f6529789-5374-4f6a-89a8-338ebb535753\") " pod="openstack/keystone-db-create-hjzh6" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.872259 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c1fa907-042f-413b-abcc-4e6fbf23b382-operator-scripts\") pod \"keystone-0809-account-create-update-vkdf5\" (UID: \"9c1fa907-042f-413b-abcc-4e6fbf23b382\") " pod="openstack/keystone-0809-account-create-update-vkdf5" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.872311 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44prn\" (UniqueName: \"kubernetes.io/projected/9c1fa907-042f-413b-abcc-4e6fbf23b382-kube-api-access-44prn\") pod \"keystone-0809-account-create-update-vkdf5\" (UID: \"9c1fa907-042f-413b-abcc-4e6fbf23b382\") " pod="openstack/keystone-0809-account-create-update-vkdf5" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.873141 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c1fa907-042f-413b-abcc-4e6fbf23b382-operator-scripts\") pod \"keystone-0809-account-create-update-vkdf5\" (UID: \"9c1fa907-042f-413b-abcc-4e6fbf23b382\") " pod="openstack/keystone-0809-account-create-update-vkdf5" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.893527 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44prn\" (UniqueName: \"kubernetes.io/projected/9c1fa907-042f-413b-abcc-4e6fbf23b382-kube-api-access-44prn\") pod \"keystone-0809-account-create-update-vkdf5\" (UID: \"9c1fa907-042f-413b-abcc-4e6fbf23b382\") " pod="openstack/keystone-0809-account-create-update-vkdf5" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.914486 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-hjzh6" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.961378 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-x7nxd"] Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.964427 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-x7nxd" Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.975750 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-x7nxd"] Nov 28 11:27:00 crc kubenswrapper[4923]: I1128 11:27:00.992998 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0809-account-create-update-vkdf5" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.074899 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-30e6-account-create-update-m9w5t"] Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.076418 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb-operator-scripts\") pod \"placement-db-create-x7nxd\" (UID: \"bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb\") " pod="openstack/placement-db-create-x7nxd" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.076521 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsfgb\" (UniqueName: \"kubernetes.io/projected/bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb-kube-api-access-nsfgb\") pod \"placement-db-create-x7nxd\" (UID: \"bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb\") " pod="openstack/placement-db-create-x7nxd" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.076850 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-30e6-account-create-update-m9w5t" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.078282 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.101100 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-30e6-account-create-update-m9w5t"] Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.178972 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d15e393-6a58-4f86-8109-27c62533f866-operator-scripts\") pod \"placement-30e6-account-create-update-m9w5t\" (UID: \"3d15e393-6a58-4f86-8109-27c62533f866\") " pod="openstack/placement-30e6-account-create-update-m9w5t" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.179297 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb-operator-scripts\") pod \"placement-db-create-x7nxd\" (UID: \"bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb\") " pod="openstack/placement-db-create-x7nxd" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.179319 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsfgb\" (UniqueName: \"kubernetes.io/projected/bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb-kube-api-access-nsfgb\") pod \"placement-db-create-x7nxd\" (UID: \"bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb\") " pod="openstack/placement-db-create-x7nxd" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.179370 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fq7x6\" (UniqueName: \"kubernetes.io/projected/3d15e393-6a58-4f86-8109-27c62533f866-kube-api-access-fq7x6\") pod \"placement-30e6-account-create-update-m9w5t\" (UID: \"3d15e393-6a58-4f86-8109-27c62533f866\") " pod="openstack/placement-30e6-account-create-update-m9w5t" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.179980 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb-operator-scripts\") pod \"placement-db-create-x7nxd\" (UID: \"bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb\") " pod="openstack/placement-db-create-x7nxd" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.202315 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsfgb\" (UniqueName: \"kubernetes.io/projected/bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb-kube-api-access-nsfgb\") pod \"placement-db-create-x7nxd\" (UID: \"bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb\") " pod="openstack/placement-db-create-x7nxd" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.281162 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fq7x6\" (UniqueName: \"kubernetes.io/projected/3d15e393-6a58-4f86-8109-27c62533f866-kube-api-access-fq7x6\") pod \"placement-30e6-account-create-update-m9w5t\" (UID: \"3d15e393-6a58-4f86-8109-27c62533f866\") " pod="openstack/placement-30e6-account-create-update-m9w5t" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.281265 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d15e393-6a58-4f86-8109-27c62533f866-operator-scripts\") pod \"placement-30e6-account-create-update-m9w5t\" (UID: \"3d15e393-6a58-4f86-8109-27c62533f866\") " pod="openstack/placement-30e6-account-create-update-m9w5t" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.281961 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d15e393-6a58-4f86-8109-27c62533f866-operator-scripts\") pod \"placement-30e6-account-create-update-m9w5t\" (UID: \"3d15e393-6a58-4f86-8109-27c62533f866\") " pod="openstack/placement-30e6-account-create-update-m9w5t" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.296318 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fq7x6\" (UniqueName: \"kubernetes.io/projected/3d15e393-6a58-4f86-8109-27c62533f866-kube-api-access-fq7x6\") pod \"placement-30e6-account-create-update-m9w5t\" (UID: \"3d15e393-6a58-4f86-8109-27c62533f866\") " pod="openstack/placement-30e6-account-create-update-m9w5t" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.307319 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-x7nxd" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.415505 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-30e6-account-create-update-m9w5t" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.446774 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-hjzh6"] Nov 28 11:27:01 crc kubenswrapper[4923]: W1128 11:27:01.454882 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf6529789_5374_4f6a_89a8_338ebb535753.slice/crio-e0494e9ff969bf455d5d0bf6f9d8b6bbef922c74975a9f8490fdaf470fe3c1f6 WatchSource:0}: Error finding container e0494e9ff969bf455d5d0bf6f9d8b6bbef922c74975a9f8490fdaf470fe3c1f6: Status 404 returned error can't find the container with id e0494e9ff969bf455d5d0bf6f9d8b6bbef922c74975a9f8490fdaf470fe3c1f6 Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.494099 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-hjzh6" event={"ID":"f6529789-5374-4f6a-89a8-338ebb535753","Type":"ContainerStarted","Data":"e0494e9ff969bf455d5d0bf6f9d8b6bbef922c74975a9f8490fdaf470fe3c1f6"} Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.517090 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-0809-account-create-update-vkdf5"] Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.531942 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-bdtml"] Nov 28 11:27:01 crc kubenswrapper[4923]: W1128 11:27:01.532524 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9c1fa907_042f_413b_abcc_4e6fbf23b382.slice/crio-efb0f52337ece197d469fc5ece6248be7f2a2bb6b7570d74902108f2351a0278 WatchSource:0}: Error finding container efb0f52337ece197d469fc5ece6248be7f2a2bb6b7570d74902108f2351a0278: Status 404 returned error can't find the container with id efb0f52337ece197d469fc5ece6248be7f2a2bb6b7570d74902108f2351a0278 Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.539566 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bdtml" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.551000 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-rt5qw" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.551354 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.558805 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-bdtml"] Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.594796 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d575e6ab-598e-4f0d-b33a-60515d1e8d21-config-data\") pod \"glance-db-sync-bdtml\" (UID: \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\") " pod="openstack/glance-db-sync-bdtml" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.594887 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2clzt\" (UniqueName: \"kubernetes.io/projected/d575e6ab-598e-4f0d-b33a-60515d1e8d21-kube-api-access-2clzt\") pod \"glance-db-sync-bdtml\" (UID: \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\") " pod="openstack/glance-db-sync-bdtml" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.594976 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d575e6ab-598e-4f0d-b33a-60515d1e8d21-combined-ca-bundle\") pod \"glance-db-sync-bdtml\" (UID: \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\") " pod="openstack/glance-db-sync-bdtml" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.595046 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d575e6ab-598e-4f0d-b33a-60515d1e8d21-db-sync-config-data\") pod \"glance-db-sync-bdtml\" (UID: \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\") " pod="openstack/glance-db-sync-bdtml" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.696745 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d575e6ab-598e-4f0d-b33a-60515d1e8d21-db-sync-config-data\") pod \"glance-db-sync-bdtml\" (UID: \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\") " pod="openstack/glance-db-sync-bdtml" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.697118 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d575e6ab-598e-4f0d-b33a-60515d1e8d21-config-data\") pod \"glance-db-sync-bdtml\" (UID: \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\") " pod="openstack/glance-db-sync-bdtml" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.697183 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2clzt\" (UniqueName: \"kubernetes.io/projected/d575e6ab-598e-4f0d-b33a-60515d1e8d21-kube-api-access-2clzt\") pod \"glance-db-sync-bdtml\" (UID: \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\") " pod="openstack/glance-db-sync-bdtml" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.697222 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d575e6ab-598e-4f0d-b33a-60515d1e8d21-combined-ca-bundle\") pod \"glance-db-sync-bdtml\" (UID: \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\") " pod="openstack/glance-db-sync-bdtml" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.701601 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d575e6ab-598e-4f0d-b33a-60515d1e8d21-db-sync-config-data\") pod \"glance-db-sync-bdtml\" (UID: \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\") " pod="openstack/glance-db-sync-bdtml" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.701694 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d575e6ab-598e-4f0d-b33a-60515d1e8d21-combined-ca-bundle\") pod \"glance-db-sync-bdtml\" (UID: \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\") " pod="openstack/glance-db-sync-bdtml" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.702175 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d575e6ab-598e-4f0d-b33a-60515d1e8d21-config-data\") pod \"glance-db-sync-bdtml\" (UID: \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\") " pod="openstack/glance-db-sync-bdtml" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.716200 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2clzt\" (UniqueName: \"kubernetes.io/projected/d575e6ab-598e-4f0d-b33a-60515d1e8d21-kube-api-access-2clzt\") pod \"glance-db-sync-bdtml\" (UID: \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\") " pod="openstack/glance-db-sync-bdtml" Nov 28 11:27:01 crc kubenswrapper[4923]: W1128 11:27:01.769425 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbb2c6ef9_4d96_46a8_b4da_1e2fa56ba3eb.slice/crio-27fda4336af8a7766aae2461db43130e50ff87672182da5db7988a04265ea6a1 WatchSource:0}: Error finding container 27fda4336af8a7766aae2461db43130e50ff87672182da5db7988a04265ea6a1: Status 404 returned error can't find the container with id 27fda4336af8a7766aae2461db43130e50ff87672182da5db7988a04265ea6a1 Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.772593 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-x7nxd"] Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.881382 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bdtml" Nov 28 11:27:01 crc kubenswrapper[4923]: I1128 11:27:01.943479 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-30e6-account-create-update-m9w5t"] Nov 28 11:27:02 crc kubenswrapper[4923]: I1128 11:27:02.442852 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-bdtml"] Nov 28 11:27:02 crc kubenswrapper[4923]: W1128 11:27:02.454233 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd575e6ab_598e_4f0d_b33a_60515d1e8d21.slice/crio-defc1df3990cb85c4091e4066c1d376b570e8f3de9788bd8b1f9e879aa3321c2 WatchSource:0}: Error finding container defc1df3990cb85c4091e4066c1d376b570e8f3de9788bd8b1f9e879aa3321c2: Status 404 returned error can't find the container with id defc1df3990cb85c4091e4066c1d376b570e8f3de9788bd8b1f9e879aa3321c2 Nov 28 11:27:02 crc kubenswrapper[4923]: I1128 11:27:02.504466 4923 generic.go:334] "Generic (PLEG): container finished" podID="9c1fa907-042f-413b-abcc-4e6fbf23b382" containerID="ac8c39960114ff71653053529fd81fb7e45cdced6ca55c468ed1a62d1925e3b8" exitCode=0 Nov 28 11:27:02 crc kubenswrapper[4923]: I1128 11:27:02.504540 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0809-account-create-update-vkdf5" event={"ID":"9c1fa907-042f-413b-abcc-4e6fbf23b382","Type":"ContainerDied","Data":"ac8c39960114ff71653053529fd81fb7e45cdced6ca55c468ed1a62d1925e3b8"} Nov 28 11:27:02 crc kubenswrapper[4923]: I1128 11:27:02.504594 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0809-account-create-update-vkdf5" event={"ID":"9c1fa907-042f-413b-abcc-4e6fbf23b382","Type":"ContainerStarted","Data":"efb0f52337ece197d469fc5ece6248be7f2a2bb6b7570d74902108f2351a0278"} Nov 28 11:27:02 crc kubenswrapper[4923]: I1128 11:27:02.506136 4923 generic.go:334] "Generic (PLEG): container finished" podID="bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb" containerID="0295fe043258bff02eb8d18a771bf4651077844dff9705f5b1afd78596678f24" exitCode=0 Nov 28 11:27:02 crc kubenswrapper[4923]: I1128 11:27:02.506224 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-x7nxd" event={"ID":"bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb","Type":"ContainerDied","Data":"0295fe043258bff02eb8d18a771bf4651077844dff9705f5b1afd78596678f24"} Nov 28 11:27:02 crc kubenswrapper[4923]: I1128 11:27:02.506265 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-x7nxd" event={"ID":"bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb","Type":"ContainerStarted","Data":"27fda4336af8a7766aae2461db43130e50ff87672182da5db7988a04265ea6a1"} Nov 28 11:27:02 crc kubenswrapper[4923]: I1128 11:27:02.507759 4923 generic.go:334] "Generic (PLEG): container finished" podID="f6529789-5374-4f6a-89a8-338ebb535753" containerID="7d91c376b62b075124ec2ede9e3d9d245aaad8f876ab72f4f2caaa9b6d42f539" exitCode=0 Nov 28 11:27:02 crc kubenswrapper[4923]: I1128 11:27:02.507809 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-hjzh6" event={"ID":"f6529789-5374-4f6a-89a8-338ebb535753","Type":"ContainerDied","Data":"7d91c376b62b075124ec2ede9e3d9d245aaad8f876ab72f4f2caaa9b6d42f539"} Nov 28 11:27:02 crc kubenswrapper[4923]: I1128 11:27:02.508925 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bdtml" event={"ID":"d575e6ab-598e-4f0d-b33a-60515d1e8d21","Type":"ContainerStarted","Data":"defc1df3990cb85c4091e4066c1d376b570e8f3de9788bd8b1f9e879aa3321c2"} Nov 28 11:27:02 crc kubenswrapper[4923]: I1128 11:27:02.510662 4923 generic.go:334] "Generic (PLEG): container finished" podID="3d15e393-6a58-4f86-8109-27c62533f866" containerID="787dacc52b2fba47783ed7379ed429d7ffefb82825deec4d4e33fe634fc30f6c" exitCode=0 Nov 28 11:27:02 crc kubenswrapper[4923]: I1128 11:27:02.510713 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-30e6-account-create-update-m9w5t" event={"ID":"3d15e393-6a58-4f86-8109-27c62533f866","Type":"ContainerDied","Data":"787dacc52b2fba47783ed7379ed429d7ffefb82825deec4d4e33fe634fc30f6c"} Nov 28 11:27:02 crc kubenswrapper[4923]: I1128 11:27:02.510732 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-30e6-account-create-update-m9w5t" event={"ID":"3d15e393-6a58-4f86-8109-27c62533f866","Type":"ContainerStarted","Data":"787121657c9343debecf7a53710a469493850a8f1144d1fbeda472d840e92495"} Nov 28 11:27:03 crc kubenswrapper[4923]: I1128 11:27:03.220310 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 28 11:27:03 crc kubenswrapper[4923]: I1128 11:27:03.885437 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-hjzh6" Nov 28 11:27:03 crc kubenswrapper[4923]: I1128 11:27:03.950146 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gpd5v\" (UniqueName: \"kubernetes.io/projected/f6529789-5374-4f6a-89a8-338ebb535753-kube-api-access-gpd5v\") pod \"f6529789-5374-4f6a-89a8-338ebb535753\" (UID: \"f6529789-5374-4f6a-89a8-338ebb535753\") " Nov 28 11:27:03 crc kubenswrapper[4923]: I1128 11:27:03.950312 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6529789-5374-4f6a-89a8-338ebb535753-operator-scripts\") pod \"f6529789-5374-4f6a-89a8-338ebb535753\" (UID: \"f6529789-5374-4f6a-89a8-338ebb535753\") " Nov 28 11:27:03 crc kubenswrapper[4923]: I1128 11:27:03.951053 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f6529789-5374-4f6a-89a8-338ebb535753-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f6529789-5374-4f6a-89a8-338ebb535753" (UID: "f6529789-5374-4f6a-89a8-338ebb535753"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:03 crc kubenswrapper[4923]: I1128 11:27:03.957689 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6529789-5374-4f6a-89a8-338ebb535753-kube-api-access-gpd5v" (OuterVolumeSpecName: "kube-api-access-gpd5v") pod "f6529789-5374-4f6a-89a8-338ebb535753" (UID: "f6529789-5374-4f6a-89a8-338ebb535753"). InnerVolumeSpecName "kube-api-access-gpd5v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.051797 4923 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6529789-5374-4f6a-89a8-338ebb535753-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.051826 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gpd5v\" (UniqueName: \"kubernetes.io/projected/f6529789-5374-4f6a-89a8-338ebb535753-kube-api-access-gpd5v\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.067259 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-30e6-account-create-update-m9w5t" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.071520 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-x7nxd" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.098906 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0809-account-create-update-vkdf5" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.155889 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nsfgb\" (UniqueName: \"kubernetes.io/projected/bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb-kube-api-access-nsfgb\") pod \"bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb\" (UID: \"bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb\") " Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.156001 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d15e393-6a58-4f86-8109-27c62533f866-operator-scripts\") pod \"3d15e393-6a58-4f86-8109-27c62533f866\" (UID: \"3d15e393-6a58-4f86-8109-27c62533f866\") " Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.156034 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44prn\" (UniqueName: \"kubernetes.io/projected/9c1fa907-042f-413b-abcc-4e6fbf23b382-kube-api-access-44prn\") pod \"9c1fa907-042f-413b-abcc-4e6fbf23b382\" (UID: \"9c1fa907-042f-413b-abcc-4e6fbf23b382\") " Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.156141 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb-operator-scripts\") pod \"bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb\" (UID: \"bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb\") " Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.156173 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fq7x6\" (UniqueName: \"kubernetes.io/projected/3d15e393-6a58-4f86-8109-27c62533f866-kube-api-access-fq7x6\") pod \"3d15e393-6a58-4f86-8109-27c62533f866\" (UID: \"3d15e393-6a58-4f86-8109-27c62533f866\") " Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.156212 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c1fa907-042f-413b-abcc-4e6fbf23b382-operator-scripts\") pod \"9c1fa907-042f-413b-abcc-4e6fbf23b382\" (UID: \"9c1fa907-042f-413b-abcc-4e6fbf23b382\") " Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.156460 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d15e393-6a58-4f86-8109-27c62533f866-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3d15e393-6a58-4f86-8109-27c62533f866" (UID: "3d15e393-6a58-4f86-8109-27c62533f866"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.156852 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb" (UID: "bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.157534 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c1fa907-042f-413b-abcc-4e6fbf23b382-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9c1fa907-042f-413b-abcc-4e6fbf23b382" (UID: "9c1fa907-042f-413b-abcc-4e6fbf23b382"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.161226 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c1fa907-042f-413b-abcc-4e6fbf23b382-kube-api-access-44prn" (OuterVolumeSpecName: "kube-api-access-44prn") pod "9c1fa907-042f-413b-abcc-4e6fbf23b382" (UID: "9c1fa907-042f-413b-abcc-4e6fbf23b382"). InnerVolumeSpecName "kube-api-access-44prn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.161312 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb-kube-api-access-nsfgb" (OuterVolumeSpecName: "kube-api-access-nsfgb") pod "bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb" (UID: "bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb"). InnerVolumeSpecName "kube-api-access-nsfgb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.167189 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d15e393-6a58-4f86-8109-27c62533f866-kube-api-access-fq7x6" (OuterVolumeSpecName: "kube-api-access-fq7x6") pod "3d15e393-6a58-4f86-8109-27c62533f866" (UID: "3d15e393-6a58-4f86-8109-27c62533f866"). InnerVolumeSpecName "kube-api-access-fq7x6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.257651 4923 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.257680 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fq7x6\" (UniqueName: \"kubernetes.io/projected/3d15e393-6a58-4f86-8109-27c62533f866-kube-api-access-fq7x6\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.257689 4923 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c1fa907-042f-413b-abcc-4e6fbf23b382-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.257697 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nsfgb\" (UniqueName: \"kubernetes.io/projected/bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb-kube-api-access-nsfgb\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.257705 4923 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d15e393-6a58-4f86-8109-27c62533f866-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.257715 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44prn\" (UniqueName: \"kubernetes.io/projected/9c1fa907-042f-413b-abcc-4e6fbf23b382-kube-api-access-44prn\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.531882 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0809-account-create-update-vkdf5" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.531891 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0809-account-create-update-vkdf5" event={"ID":"9c1fa907-042f-413b-abcc-4e6fbf23b382","Type":"ContainerDied","Data":"efb0f52337ece197d469fc5ece6248be7f2a2bb6b7570d74902108f2351a0278"} Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.532652 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="efb0f52337ece197d469fc5ece6248be7f2a2bb6b7570d74902108f2351a0278" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.538102 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-x7nxd" event={"ID":"bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb","Type":"ContainerDied","Data":"27fda4336af8a7766aae2461db43130e50ff87672182da5db7988a04265ea6a1"} Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.538139 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27fda4336af8a7766aae2461db43130e50ff87672182da5db7988a04265ea6a1" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.538248 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-x7nxd" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.542163 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-hjzh6" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.542284 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-hjzh6" event={"ID":"f6529789-5374-4f6a-89a8-338ebb535753","Type":"ContainerDied","Data":"e0494e9ff969bf455d5d0bf6f9d8b6bbef922c74975a9f8490fdaf470fe3c1f6"} Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.542307 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e0494e9ff969bf455d5d0bf6f9d8b6bbef922c74975a9f8490fdaf470fe3c1f6" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.543473 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-30e6-account-create-update-m9w5t" event={"ID":"3d15e393-6a58-4f86-8109-27c62533f866","Type":"ContainerDied","Data":"787121657c9343debecf7a53710a469493850a8f1144d1fbeda472d840e92495"} Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.543508 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="787121657c9343debecf7a53710a469493850a8f1144d1fbeda472d840e92495" Nov 28 11:27:04 crc kubenswrapper[4923]: I1128 11:27:04.543561 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-30e6-account-create-update-m9w5t" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.414826 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-67x9j" podUID="817453fc-6da1-4525-85bf-0d8b22848ff1" containerName="ovn-controller" probeResult="failure" output=< Nov 28 11:27:11 crc kubenswrapper[4923]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 28 11:27:11 crc kubenswrapper[4923]: > Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.434601 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.438125 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-6d9vf" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.684525 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-67x9j-config-nmht2"] Nov 28 11:27:11 crc kubenswrapper[4923]: E1128 11:27:11.684856 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb" containerName="mariadb-database-create" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.684875 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb" containerName="mariadb-database-create" Nov 28 11:27:11 crc kubenswrapper[4923]: E1128 11:27:11.684891 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6529789-5374-4f6a-89a8-338ebb535753" containerName="mariadb-database-create" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.684899 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6529789-5374-4f6a-89a8-338ebb535753" containerName="mariadb-database-create" Nov 28 11:27:11 crc kubenswrapper[4923]: E1128 11:27:11.684947 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c1fa907-042f-413b-abcc-4e6fbf23b382" containerName="mariadb-account-create-update" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.684956 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c1fa907-042f-413b-abcc-4e6fbf23b382" containerName="mariadb-account-create-update" Nov 28 11:27:11 crc kubenswrapper[4923]: E1128 11:27:11.684969 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d15e393-6a58-4f86-8109-27c62533f866" containerName="mariadb-account-create-update" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.684977 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d15e393-6a58-4f86-8109-27c62533f866" containerName="mariadb-account-create-update" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.685160 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c1fa907-042f-413b-abcc-4e6fbf23b382" containerName="mariadb-account-create-update" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.685177 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6529789-5374-4f6a-89a8-338ebb535753" containerName="mariadb-database-create" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.685198 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb" containerName="mariadb-database-create" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.685209 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d15e393-6a58-4f86-8109-27c62533f866" containerName="mariadb-account-create-update" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.685769 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.689425 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.694639 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-67x9j-config-nmht2"] Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.777729 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/475f664a-4416-4dad-bc1f-303b3a9e40e2-var-run-ovn\") pod \"ovn-controller-67x9j-config-nmht2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.777772 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/475f664a-4416-4dad-bc1f-303b3a9e40e2-var-run\") pod \"ovn-controller-67x9j-config-nmht2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.777801 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/475f664a-4416-4dad-bc1f-303b3a9e40e2-additional-scripts\") pod \"ovn-controller-67x9j-config-nmht2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.777826 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/475f664a-4416-4dad-bc1f-303b3a9e40e2-scripts\") pod \"ovn-controller-67x9j-config-nmht2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.778001 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89bs8\" (UniqueName: \"kubernetes.io/projected/475f664a-4416-4dad-bc1f-303b3a9e40e2-kube-api-access-89bs8\") pod \"ovn-controller-67x9j-config-nmht2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.778098 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/475f664a-4416-4dad-bc1f-303b3a9e40e2-var-log-ovn\") pod \"ovn-controller-67x9j-config-nmht2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.879060 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/475f664a-4416-4dad-bc1f-303b3a9e40e2-additional-scripts\") pod \"ovn-controller-67x9j-config-nmht2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.879108 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/475f664a-4416-4dad-bc1f-303b3a9e40e2-scripts\") pod \"ovn-controller-67x9j-config-nmht2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.879141 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89bs8\" (UniqueName: \"kubernetes.io/projected/475f664a-4416-4dad-bc1f-303b3a9e40e2-kube-api-access-89bs8\") pod \"ovn-controller-67x9j-config-nmht2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.879497 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/475f664a-4416-4dad-bc1f-303b3a9e40e2-var-log-ovn\") pod \"ovn-controller-67x9j-config-nmht2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.879874 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/475f664a-4416-4dad-bc1f-303b3a9e40e2-additional-scripts\") pod \"ovn-controller-67x9j-config-nmht2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.881081 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/475f664a-4416-4dad-bc1f-303b3a9e40e2-scripts\") pod \"ovn-controller-67x9j-config-nmht2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.881762 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/475f664a-4416-4dad-bc1f-303b3a9e40e2-var-run-ovn\") pod \"ovn-controller-67x9j-config-nmht2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.881780 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/475f664a-4416-4dad-bc1f-303b3a9e40e2-var-log-ovn\") pod \"ovn-controller-67x9j-config-nmht2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.881829 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/475f664a-4416-4dad-bc1f-303b3a9e40e2-var-run\") pod \"ovn-controller-67x9j-config-nmht2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.881967 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/475f664a-4416-4dad-bc1f-303b3a9e40e2-var-run\") pod \"ovn-controller-67x9j-config-nmht2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.882008 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/475f664a-4416-4dad-bc1f-303b3a9e40e2-var-run-ovn\") pod \"ovn-controller-67x9j-config-nmht2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:11 crc kubenswrapper[4923]: I1128 11:27:11.900745 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89bs8\" (UniqueName: \"kubernetes.io/projected/475f664a-4416-4dad-bc1f-303b3a9e40e2-kube-api-access-89bs8\") pod \"ovn-controller-67x9j-config-nmht2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:12 crc kubenswrapper[4923]: I1128 11:27:12.008309 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:12 crc kubenswrapper[4923]: I1128 11:27:12.604521 4923 generic.go:334] "Generic (PLEG): container finished" podID="456d70c2-443b-455b-83fe-fc87e36534ac" containerID="6b9977387f4a04660289708811d6e9fd63ab44d05d56ad4d5f94de24f39428d6" exitCode=0 Nov 28 11:27:12 crc kubenswrapper[4923]: I1128 11:27:12.604610 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"456d70c2-443b-455b-83fe-fc87e36534ac","Type":"ContainerDied","Data":"6b9977387f4a04660289708811d6e9fd63ab44d05d56ad4d5f94de24f39428d6"} Nov 28 11:27:12 crc kubenswrapper[4923]: I1128 11:27:12.610851 4923 generic.go:334] "Generic (PLEG): container finished" podID="26a4b167-a30a-4655-80aa-2177fe14784c" containerID="877c6e8210bfbbb050a57173fa72769c5cca178fe72691fd5da642acdfd3f260" exitCode=0 Nov 28 11:27:12 crc kubenswrapper[4923]: I1128 11:27:12.610891 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"26a4b167-a30a-4655-80aa-2177fe14784c","Type":"ContainerDied","Data":"877c6e8210bfbbb050a57173fa72769c5cca178fe72691fd5da642acdfd3f260"} Nov 28 11:27:14 crc kubenswrapper[4923]: I1128 11:27:14.026510 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:27:14 crc kubenswrapper[4923]: I1128 11:27:14.026844 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:27:16 crc kubenswrapper[4923]: I1128 11:27:16.435164 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-67x9j" podUID="817453fc-6da1-4525-85bf-0d8b22848ff1" containerName="ovn-controller" probeResult="failure" output=< Nov 28 11:27:16 crc kubenswrapper[4923]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 28 11:27:16 crc kubenswrapper[4923]: > Nov 28 11:27:18 crc kubenswrapper[4923]: I1128 11:27:18.175532 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-67x9j-config-nmht2"] Nov 28 11:27:18 crc kubenswrapper[4923]: I1128 11:27:18.666970 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-67x9j-config-nmht2" event={"ID":"475f664a-4416-4dad-bc1f-303b3a9e40e2","Type":"ContainerStarted","Data":"e2322e4c37f86cdad09834dca1076b76a685f0ee16d49528d28561ad073215ec"} Nov 28 11:27:18 crc kubenswrapper[4923]: I1128 11:27:18.667252 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-67x9j-config-nmht2" event={"ID":"475f664a-4416-4dad-bc1f-303b3a9e40e2","Type":"ContainerStarted","Data":"39d879f904c66a911384dd6e6b52d2c4a8a0cfac72c1369dccec6e14dd8e0c1a"} Nov 28 11:27:18 crc kubenswrapper[4923]: I1128 11:27:18.669000 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"456d70c2-443b-455b-83fe-fc87e36534ac","Type":"ContainerStarted","Data":"0adef79547a29cf58c840ff6fe32e02579298f45c778018b82629cb1f6d2e4e8"} Nov 28 11:27:18 crc kubenswrapper[4923]: I1128 11:27:18.669667 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:27:18 crc kubenswrapper[4923]: I1128 11:27:18.674050 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"26a4b167-a30a-4655-80aa-2177fe14784c","Type":"ContainerStarted","Data":"15ed96e1e9c6a52ee3fa69e21deee536fed7f72518ec9117db061e51c643648e"} Nov 28 11:27:18 crc kubenswrapper[4923]: I1128 11:27:18.674332 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 28 11:27:18 crc kubenswrapper[4923]: I1128 11:27:18.697618 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-67x9j-config-nmht2" podStartSLOduration=7.697600291 podStartE2EDuration="7.697600291s" podCreationTimestamp="2025-11-28 11:27:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:27:18.696888451 +0000 UTC m=+1117.825572661" watchObservedRunningTime="2025-11-28 11:27:18.697600291 +0000 UTC m=+1117.826284501" Nov 28 11:27:18 crc kubenswrapper[4923]: I1128 11:27:18.832115 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=41.546963588 podStartE2EDuration="1m22.83210079s" podCreationTimestamp="2025-11-28 11:25:56 +0000 UTC" firstStartedPulling="2025-11-28 11:25:58.216369492 +0000 UTC m=+1037.345053702" lastFinishedPulling="2025-11-28 11:26:39.501506614 +0000 UTC m=+1078.630190904" observedRunningTime="2025-11-28 11:27:18.779679116 +0000 UTC m=+1117.908363326" watchObservedRunningTime="2025-11-28 11:27:18.83210079 +0000 UTC m=+1117.960784990" Nov 28 11:27:18 crc kubenswrapper[4923]: I1128 11:27:18.842709 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=42.141686828 podStartE2EDuration="1m22.84269092s" podCreationTimestamp="2025-11-28 11:25:56 +0000 UTC" firstStartedPulling="2025-11-28 11:25:58.789052059 +0000 UTC m=+1037.917736269" lastFinishedPulling="2025-11-28 11:26:39.490056111 +0000 UTC m=+1078.618740361" observedRunningTime="2025-11-28 11:27:18.83351495 +0000 UTC m=+1117.962199160" watchObservedRunningTime="2025-11-28 11:27:18.84269092 +0000 UTC m=+1117.971375130" Nov 28 11:27:19 crc kubenswrapper[4923]: I1128 11:27:19.683024 4923 generic.go:334] "Generic (PLEG): container finished" podID="475f664a-4416-4dad-bc1f-303b3a9e40e2" containerID="e2322e4c37f86cdad09834dca1076b76a685f0ee16d49528d28561ad073215ec" exitCode=0 Nov 28 11:27:19 crc kubenswrapper[4923]: I1128 11:27:19.683127 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-67x9j-config-nmht2" event={"ID":"475f664a-4416-4dad-bc1f-303b3a9e40e2","Type":"ContainerDied","Data":"e2322e4c37f86cdad09834dca1076b76a685f0ee16d49528d28561ad073215ec"} Nov 28 11:27:19 crc kubenswrapper[4923]: I1128 11:27:19.684807 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bdtml" event={"ID":"d575e6ab-598e-4f0d-b33a-60515d1e8d21","Type":"ContainerStarted","Data":"ec27bbb8d3db358884501b5303609ad0fbe631169d5b05fb5f24743f537863cf"} Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.029697 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.048144 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-bdtml" podStartSLOduration=4.525036528 podStartE2EDuration="20.048126516s" podCreationTimestamp="2025-11-28 11:27:01 +0000 UTC" firstStartedPulling="2025-11-28 11:27:02.456643381 +0000 UTC m=+1101.585327591" lastFinishedPulling="2025-11-28 11:27:17.979733369 +0000 UTC m=+1117.108417579" observedRunningTime="2025-11-28 11:27:19.754572458 +0000 UTC m=+1118.883256668" watchObservedRunningTime="2025-11-28 11:27:21.048126516 +0000 UTC m=+1120.176810726" Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.179748 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/475f664a-4416-4dad-bc1f-303b3a9e40e2-scripts\") pod \"475f664a-4416-4dad-bc1f-303b3a9e40e2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.179782 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/475f664a-4416-4dad-bc1f-303b3a9e40e2-var-run-ovn\") pod \"475f664a-4416-4dad-bc1f-303b3a9e40e2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.179927 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/475f664a-4416-4dad-bc1f-303b3a9e40e2-additional-scripts\") pod \"475f664a-4416-4dad-bc1f-303b3a9e40e2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.179974 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/475f664a-4416-4dad-bc1f-303b3a9e40e2-var-run\") pod \"475f664a-4416-4dad-bc1f-303b3a9e40e2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.179992 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/475f664a-4416-4dad-bc1f-303b3a9e40e2-var-log-ovn\") pod \"475f664a-4416-4dad-bc1f-303b3a9e40e2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.180036 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89bs8\" (UniqueName: \"kubernetes.io/projected/475f664a-4416-4dad-bc1f-303b3a9e40e2-kube-api-access-89bs8\") pod \"475f664a-4416-4dad-bc1f-303b3a9e40e2\" (UID: \"475f664a-4416-4dad-bc1f-303b3a9e40e2\") " Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.180383 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/475f664a-4416-4dad-bc1f-303b3a9e40e2-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "475f664a-4416-4dad-bc1f-303b3a9e40e2" (UID: "475f664a-4416-4dad-bc1f-303b3a9e40e2"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.180397 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/475f664a-4416-4dad-bc1f-303b3a9e40e2-var-run" (OuterVolumeSpecName: "var-run") pod "475f664a-4416-4dad-bc1f-303b3a9e40e2" (UID: "475f664a-4416-4dad-bc1f-303b3a9e40e2"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.180434 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/475f664a-4416-4dad-bc1f-303b3a9e40e2-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "475f664a-4416-4dad-bc1f-303b3a9e40e2" (UID: "475f664a-4416-4dad-bc1f-303b3a9e40e2"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.180812 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/475f664a-4416-4dad-bc1f-303b3a9e40e2-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "475f664a-4416-4dad-bc1f-303b3a9e40e2" (UID: "475f664a-4416-4dad-bc1f-303b3a9e40e2"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.181054 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/475f664a-4416-4dad-bc1f-303b3a9e40e2-scripts" (OuterVolumeSpecName: "scripts") pod "475f664a-4416-4dad-bc1f-303b3a9e40e2" (UID: "475f664a-4416-4dad-bc1f-303b3a9e40e2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.186424 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/475f664a-4416-4dad-bc1f-303b3a9e40e2-kube-api-access-89bs8" (OuterVolumeSpecName: "kube-api-access-89bs8") pod "475f664a-4416-4dad-bc1f-303b3a9e40e2" (UID: "475f664a-4416-4dad-bc1f-303b3a9e40e2"). InnerVolumeSpecName "kube-api-access-89bs8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.273065 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-67x9j-config-nmht2"] Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.282624 4923 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/475f664a-4416-4dad-bc1f-303b3a9e40e2-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.282746 4923 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/475f664a-4416-4dad-bc1f-303b3a9e40e2-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.282823 4923 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/475f664a-4416-4dad-bc1f-303b3a9e40e2-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.282888 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89bs8\" (UniqueName: \"kubernetes.io/projected/475f664a-4416-4dad-bc1f-303b3a9e40e2-kube-api-access-89bs8\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.282974 4923 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/475f664a-4416-4dad-bc1f-303b3a9e40e2-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.283029 4923 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/475f664a-4416-4dad-bc1f-303b3a9e40e2-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.298555 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-67x9j-config-nmht2"] Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.394792 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-67x9j" Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.700984 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39d879f904c66a911384dd6e6b52d2c4a8a0cfac72c1369dccec6e14dd8e0c1a" Nov 28 11:27:21 crc kubenswrapper[4923]: I1128 11:27:21.701089 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-67x9j-config-nmht2" Nov 28 11:27:23 crc kubenswrapper[4923]: I1128 11:27:23.178359 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="475f664a-4416-4dad-bc1f-303b3a9e40e2" path="/var/lib/kubelet/pods/475f664a-4416-4dad-bc1f-303b3a9e40e2/volumes" Nov 28 11:27:26 crc kubenswrapper[4923]: I1128 11:27:26.749662 4923 generic.go:334] "Generic (PLEG): container finished" podID="d575e6ab-598e-4f0d-b33a-60515d1e8d21" containerID="ec27bbb8d3db358884501b5303609ad0fbe631169d5b05fb5f24743f537863cf" exitCode=0 Nov 28 11:27:26 crc kubenswrapper[4923]: I1128 11:27:26.749886 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bdtml" event={"ID":"d575e6ab-598e-4f0d-b33a-60515d1e8d21","Type":"ContainerDied","Data":"ec27bbb8d3db358884501b5303609ad0fbe631169d5b05fb5f24743f537863cf"} Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.010185 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.248462 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bdtml" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.388346 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-hzc9c"] Nov 28 11:27:28 crc kubenswrapper[4923]: E1128 11:27:28.388661 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d575e6ab-598e-4f0d-b33a-60515d1e8d21" containerName="glance-db-sync" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.388674 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="d575e6ab-598e-4f0d-b33a-60515d1e8d21" containerName="glance-db-sync" Nov 28 11:27:28 crc kubenswrapper[4923]: E1128 11:27:28.388686 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="475f664a-4416-4dad-bc1f-303b3a9e40e2" containerName="ovn-config" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.388692 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="475f664a-4416-4dad-bc1f-303b3a9e40e2" containerName="ovn-config" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.388844 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="d575e6ab-598e-4f0d-b33a-60515d1e8d21" containerName="glance-db-sync" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.388862 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="475f664a-4416-4dad-bc1f-303b3a9e40e2" containerName="ovn-config" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.389349 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-hzc9c" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.407213 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d575e6ab-598e-4f0d-b33a-60515d1e8d21-config-data\") pod \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\" (UID: \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\") " Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.407259 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2clzt\" (UniqueName: \"kubernetes.io/projected/d575e6ab-598e-4f0d-b33a-60515d1e8d21-kube-api-access-2clzt\") pod \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\" (UID: \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\") " Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.407326 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d575e6ab-598e-4f0d-b33a-60515d1e8d21-db-sync-config-data\") pod \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\" (UID: \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\") " Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.407351 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d575e6ab-598e-4f0d-b33a-60515d1e8d21-combined-ca-bundle\") pod \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\" (UID: \"d575e6ab-598e-4f0d-b33a-60515d1e8d21\") " Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.408832 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-hzc9c"] Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.431093 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d575e6ab-598e-4f0d-b33a-60515d1e8d21-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "d575e6ab-598e-4f0d-b33a-60515d1e8d21" (UID: "d575e6ab-598e-4f0d-b33a-60515d1e8d21"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.444694 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d575e6ab-598e-4f0d-b33a-60515d1e8d21-kube-api-access-2clzt" (OuterVolumeSpecName: "kube-api-access-2clzt") pod "d575e6ab-598e-4f0d-b33a-60515d1e8d21" (UID: "d575e6ab-598e-4f0d-b33a-60515d1e8d21"). InnerVolumeSpecName "kube-api-access-2clzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.463092 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d575e6ab-598e-4f0d-b33a-60515d1e8d21-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d575e6ab-598e-4f0d-b33a-60515d1e8d21" (UID: "d575e6ab-598e-4f0d-b33a-60515d1e8d21"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.479618 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-kln9b"] Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.480639 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-kln9b" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.508849 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c24sg\" (UniqueName: \"kubernetes.io/projected/fa707c0f-6bdb-4597-b001-d457323f04c1-kube-api-access-c24sg\") pod \"cinder-db-create-hzc9c\" (UID: \"fa707c0f-6bdb-4597-b001-d457323f04c1\") " pod="openstack/cinder-db-create-hzc9c" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.508904 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fa707c0f-6bdb-4597-b001-d457323f04c1-operator-scripts\") pod \"cinder-db-create-hzc9c\" (UID: \"fa707c0f-6bdb-4597-b001-d457323f04c1\") " pod="openstack/cinder-db-create-hzc9c" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.508967 4923 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d575e6ab-598e-4f0d-b33a-60515d1e8d21-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.508979 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d575e6ab-598e-4f0d-b33a-60515d1e8d21-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.508990 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2clzt\" (UniqueName: \"kubernetes.io/projected/d575e6ab-598e-4f0d-b33a-60515d1e8d21-kube-api-access-2clzt\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.512442 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-kln9b"] Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.515103 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d575e6ab-598e-4f0d-b33a-60515d1e8d21-config-data" (OuterVolumeSpecName: "config-data") pod "d575e6ab-598e-4f0d-b33a-60515d1e8d21" (UID: "d575e6ab-598e-4f0d-b33a-60515d1e8d21"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.569226 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-e24f-account-create-update-72bgf"] Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.570083 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e24f-account-create-update-72bgf" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.572555 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.592893 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-e24f-account-create-update-72bgf"] Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.616318 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c24sg\" (UniqueName: \"kubernetes.io/projected/fa707c0f-6bdb-4597-b001-d457323f04c1-kube-api-access-c24sg\") pod \"cinder-db-create-hzc9c\" (UID: \"fa707c0f-6bdb-4597-b001-d457323f04c1\") " pod="openstack/cinder-db-create-hzc9c" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.616393 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fa707c0f-6bdb-4597-b001-d457323f04c1-operator-scripts\") pod \"cinder-db-create-hzc9c\" (UID: \"fa707c0f-6bdb-4597-b001-d457323f04c1\") " pod="openstack/cinder-db-create-hzc9c" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.616464 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc24e28a-d913-47b0-a352-9962063ffedf-operator-scripts\") pod \"barbican-db-create-kln9b\" (UID: \"bc24e28a-d913-47b0-a352-9962063ffedf\") " pod="openstack/barbican-db-create-kln9b" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.616501 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbp5k\" (UniqueName: \"kubernetes.io/projected/bc24e28a-d913-47b0-a352-9962063ffedf-kube-api-access-cbp5k\") pod \"barbican-db-create-kln9b\" (UID: \"bc24e28a-d913-47b0-a352-9962063ffedf\") " pod="openstack/barbican-db-create-kln9b" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.616572 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d575e6ab-598e-4f0d-b33a-60515d1e8d21-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.617511 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fa707c0f-6bdb-4597-b001-d457323f04c1-operator-scripts\") pod \"cinder-db-create-hzc9c\" (UID: \"fa707c0f-6bdb-4597-b001-d457323f04c1\") " pod="openstack/cinder-db-create-hzc9c" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.658967 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c24sg\" (UniqueName: \"kubernetes.io/projected/fa707c0f-6bdb-4597-b001-d457323f04c1-kube-api-access-c24sg\") pod \"cinder-db-create-hzc9c\" (UID: \"fa707c0f-6bdb-4597-b001-d457323f04c1\") " pod="openstack/cinder-db-create-hzc9c" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.676969 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-4cc8-account-create-update-xwrmh"] Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.677874 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4cc8-account-create-update-xwrmh" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.697608 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.703898 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-qfdbm"] Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.705028 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-qfdbm" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.708782 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-4cc8-account-create-update-xwrmh"] Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.709919 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-hzc9c" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.711353 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.711498 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.711611 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.712593 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-4f87p" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.723228 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-qfdbm"] Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.756828 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqbjz\" (UniqueName: \"kubernetes.io/projected/68243bc7-2ab3-4632-9ff1-d3af61a0acb3-kube-api-access-xqbjz\") pod \"barbican-e24f-account-create-update-72bgf\" (UID: \"68243bc7-2ab3-4632-9ff1-d3af61a0acb3\") " pod="openstack/barbican-e24f-account-create-update-72bgf" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.756970 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68243bc7-2ab3-4632-9ff1-d3af61a0acb3-operator-scripts\") pod \"barbican-e24f-account-create-update-72bgf\" (UID: \"68243bc7-2ab3-4632-9ff1-d3af61a0acb3\") " pod="openstack/barbican-e24f-account-create-update-72bgf" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.757007 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc24e28a-d913-47b0-a352-9962063ffedf-operator-scripts\") pod \"barbican-db-create-kln9b\" (UID: \"bc24e28a-d913-47b0-a352-9962063ffedf\") " pod="openstack/barbican-db-create-kln9b" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.757035 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cbp5k\" (UniqueName: \"kubernetes.io/projected/bc24e28a-d913-47b0-a352-9962063ffedf-kube-api-access-cbp5k\") pod \"barbican-db-create-kln9b\" (UID: \"bc24e28a-d913-47b0-a352-9962063ffedf\") " pod="openstack/barbican-db-create-kln9b" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.757952 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc24e28a-d913-47b0-a352-9962063ffedf-operator-scripts\") pod \"barbican-db-create-kln9b\" (UID: \"bc24e28a-d913-47b0-a352-9962063ffedf\") " pod="openstack/barbican-db-create-kln9b" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.769295 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bdtml" event={"ID":"d575e6ab-598e-4f0d-b33a-60515d1e8d21","Type":"ContainerDied","Data":"defc1df3990cb85c4091e4066c1d376b570e8f3de9788bd8b1f9e879aa3321c2"} Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.769421 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="defc1df3990cb85c4091e4066c1d376b570e8f3de9788bd8b1f9e879aa3321c2" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.769539 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bdtml" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.816205 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cbp5k\" (UniqueName: \"kubernetes.io/projected/bc24e28a-d913-47b0-a352-9962063ffedf-kube-api-access-cbp5k\") pod \"barbican-db-create-kln9b\" (UID: \"bc24e28a-d913-47b0-a352-9962063ffedf\") " pod="openstack/barbican-db-create-kln9b" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.856817 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-kln9b" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.858050 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8x59\" (UniqueName: \"kubernetes.io/projected/9fca30b0-b933-4526-9006-e477a86836a6-kube-api-access-g8x59\") pod \"keystone-db-sync-qfdbm\" (UID: \"9fca30b0-b933-4526-9006-e477a86836a6\") " pod="openstack/keystone-db-sync-qfdbm" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.858116 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9fca30b0-b933-4526-9006-e477a86836a6-config-data\") pod \"keystone-db-sync-qfdbm\" (UID: \"9fca30b0-b933-4526-9006-e477a86836a6\") " pod="openstack/keystone-db-sync-qfdbm" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.858146 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldj85\" (UniqueName: \"kubernetes.io/projected/5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68-kube-api-access-ldj85\") pod \"cinder-4cc8-account-create-update-xwrmh\" (UID: \"5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68\") " pod="openstack/cinder-4cc8-account-create-update-xwrmh" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.858187 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68243bc7-2ab3-4632-9ff1-d3af61a0acb3-operator-scripts\") pod \"barbican-e24f-account-create-update-72bgf\" (UID: \"68243bc7-2ab3-4632-9ff1-d3af61a0acb3\") " pod="openstack/barbican-e24f-account-create-update-72bgf" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.858247 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68-operator-scripts\") pod \"cinder-4cc8-account-create-update-xwrmh\" (UID: \"5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68\") " pod="openstack/cinder-4cc8-account-create-update-xwrmh" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.858274 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqbjz\" (UniqueName: \"kubernetes.io/projected/68243bc7-2ab3-4632-9ff1-d3af61a0acb3-kube-api-access-xqbjz\") pod \"barbican-e24f-account-create-update-72bgf\" (UID: \"68243bc7-2ab3-4632-9ff1-d3af61a0acb3\") " pod="openstack/barbican-e24f-account-create-update-72bgf" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.858291 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9fca30b0-b933-4526-9006-e477a86836a6-combined-ca-bundle\") pod \"keystone-db-sync-qfdbm\" (UID: \"9fca30b0-b933-4526-9006-e477a86836a6\") " pod="openstack/keystone-db-sync-qfdbm" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.858953 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68243bc7-2ab3-4632-9ff1-d3af61a0acb3-operator-scripts\") pod \"barbican-e24f-account-create-update-72bgf\" (UID: \"68243bc7-2ab3-4632-9ff1-d3af61a0acb3\") " pod="openstack/barbican-e24f-account-create-update-72bgf" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.862702 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-gtjhb"] Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.863670 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-gtjhb" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.882483 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-gtjhb"] Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.902354 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqbjz\" (UniqueName: \"kubernetes.io/projected/68243bc7-2ab3-4632-9ff1-d3af61a0acb3-kube-api-access-xqbjz\") pod \"barbican-e24f-account-create-update-72bgf\" (UID: \"68243bc7-2ab3-4632-9ff1-d3af61a0acb3\") " pod="openstack/barbican-e24f-account-create-update-72bgf" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.960661 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8x59\" (UniqueName: \"kubernetes.io/projected/9fca30b0-b933-4526-9006-e477a86836a6-kube-api-access-g8x59\") pod \"keystone-db-sync-qfdbm\" (UID: \"9fca30b0-b933-4526-9006-e477a86836a6\") " pod="openstack/keystone-db-sync-qfdbm" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.960722 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9fca30b0-b933-4526-9006-e477a86836a6-config-data\") pod \"keystone-db-sync-qfdbm\" (UID: \"9fca30b0-b933-4526-9006-e477a86836a6\") " pod="openstack/keystone-db-sync-qfdbm" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.960748 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldj85\" (UniqueName: \"kubernetes.io/projected/5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68-kube-api-access-ldj85\") pod \"cinder-4cc8-account-create-update-xwrmh\" (UID: \"5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68\") " pod="openstack/cinder-4cc8-account-create-update-xwrmh" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.960805 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68-operator-scripts\") pod \"cinder-4cc8-account-create-update-xwrmh\" (UID: \"5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68\") " pod="openstack/cinder-4cc8-account-create-update-xwrmh" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.960827 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9fca30b0-b933-4526-9006-e477a86836a6-combined-ca-bundle\") pod \"keystone-db-sync-qfdbm\" (UID: \"9fca30b0-b933-4526-9006-e477a86836a6\") " pod="openstack/keystone-db-sync-qfdbm" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.963959 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68-operator-scripts\") pod \"cinder-4cc8-account-create-update-xwrmh\" (UID: \"5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68\") " pod="openstack/cinder-4cc8-account-create-update-xwrmh" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.966576 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9fca30b0-b933-4526-9006-e477a86836a6-config-data\") pod \"keystone-db-sync-qfdbm\" (UID: \"9fca30b0-b933-4526-9006-e477a86836a6\") " pod="openstack/keystone-db-sync-qfdbm" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.966804 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9fca30b0-b933-4526-9006-e477a86836a6-combined-ca-bundle\") pod \"keystone-db-sync-qfdbm\" (UID: \"9fca30b0-b933-4526-9006-e477a86836a6\") " pod="openstack/keystone-db-sync-qfdbm" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.976283 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-278d-account-create-update-7ws6f"] Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.977325 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-278d-account-create-update-7ws6f" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.984260 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 28 11:27:28 crc kubenswrapper[4923]: I1128 11:27:28.987224 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-278d-account-create-update-7ws6f"] Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.011861 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8x59\" (UniqueName: \"kubernetes.io/projected/9fca30b0-b933-4526-9006-e477a86836a6-kube-api-access-g8x59\") pod \"keystone-db-sync-qfdbm\" (UID: \"9fca30b0-b933-4526-9006-e477a86836a6\") " pod="openstack/keystone-db-sync-qfdbm" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.024617 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldj85\" (UniqueName: \"kubernetes.io/projected/5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68-kube-api-access-ldj85\") pod \"cinder-4cc8-account-create-update-xwrmh\" (UID: \"5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68\") " pod="openstack/cinder-4cc8-account-create-update-xwrmh" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.062523 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5r6gg\" (UniqueName: \"kubernetes.io/projected/856d0fb6-7167-4553-b626-aaa75d43f5ab-kube-api-access-5r6gg\") pod \"neutron-db-create-gtjhb\" (UID: \"856d0fb6-7167-4553-b626-aaa75d43f5ab\") " pod="openstack/neutron-db-create-gtjhb" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.062573 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/856d0fb6-7167-4553-b626-aaa75d43f5ab-operator-scripts\") pod \"neutron-db-create-gtjhb\" (UID: \"856d0fb6-7167-4553-b626-aaa75d43f5ab\") " pod="openstack/neutron-db-create-gtjhb" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.077967 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-qfdbm" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.165518 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/06f81fa3-fcd1-4f4d-9dbe-fe659371c477-operator-scripts\") pod \"neutron-278d-account-create-update-7ws6f\" (UID: \"06f81fa3-fcd1-4f4d-9dbe-fe659371c477\") " pod="openstack/neutron-278d-account-create-update-7ws6f" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.165640 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5r6gg\" (UniqueName: \"kubernetes.io/projected/856d0fb6-7167-4553-b626-aaa75d43f5ab-kube-api-access-5r6gg\") pod \"neutron-db-create-gtjhb\" (UID: \"856d0fb6-7167-4553-b626-aaa75d43f5ab\") " pod="openstack/neutron-db-create-gtjhb" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.165879 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/856d0fb6-7167-4553-b626-aaa75d43f5ab-operator-scripts\") pod \"neutron-db-create-gtjhb\" (UID: \"856d0fb6-7167-4553-b626-aaa75d43f5ab\") " pod="openstack/neutron-db-create-gtjhb" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.165958 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckgqq\" (UniqueName: \"kubernetes.io/projected/06f81fa3-fcd1-4f4d-9dbe-fe659371c477-kube-api-access-ckgqq\") pod \"neutron-278d-account-create-update-7ws6f\" (UID: \"06f81fa3-fcd1-4f4d-9dbe-fe659371c477\") " pod="openstack/neutron-278d-account-create-update-7ws6f" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.166732 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/856d0fb6-7167-4553-b626-aaa75d43f5ab-operator-scripts\") pod \"neutron-db-create-gtjhb\" (UID: \"856d0fb6-7167-4553-b626-aaa75d43f5ab\") " pod="openstack/neutron-db-create-gtjhb" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.189687 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e24f-account-create-update-72bgf" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.211495 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5r6gg\" (UniqueName: \"kubernetes.io/projected/856d0fb6-7167-4553-b626-aaa75d43f5ab-kube-api-access-5r6gg\") pod \"neutron-db-create-gtjhb\" (UID: \"856d0fb6-7167-4553-b626-aaa75d43f5ab\") " pod="openstack/neutron-db-create-gtjhb" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.266722 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckgqq\" (UniqueName: \"kubernetes.io/projected/06f81fa3-fcd1-4f4d-9dbe-fe659371c477-kube-api-access-ckgqq\") pod \"neutron-278d-account-create-update-7ws6f\" (UID: \"06f81fa3-fcd1-4f4d-9dbe-fe659371c477\") " pod="openstack/neutron-278d-account-create-update-7ws6f" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.266776 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/06f81fa3-fcd1-4f4d-9dbe-fe659371c477-operator-scripts\") pod \"neutron-278d-account-create-update-7ws6f\" (UID: \"06f81fa3-fcd1-4f4d-9dbe-fe659371c477\") " pod="openstack/neutron-278d-account-create-update-7ws6f" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.267728 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/06f81fa3-fcd1-4f4d-9dbe-fe659371c477-operator-scripts\") pod \"neutron-278d-account-create-update-7ws6f\" (UID: \"06f81fa3-fcd1-4f4d-9dbe-fe659371c477\") " pod="openstack/neutron-278d-account-create-update-7ws6f" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.293251 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4cc8-account-create-update-xwrmh" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.302447 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-54f9b7b8d9-d9ssw"] Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.308741 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.346534 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckgqq\" (UniqueName: \"kubernetes.io/projected/06f81fa3-fcd1-4f4d-9dbe-fe659371c477-kube-api-access-ckgqq\") pod \"neutron-278d-account-create-update-7ws6f\" (UID: \"06f81fa3-fcd1-4f4d-9dbe-fe659371c477\") " pod="openstack/neutron-278d-account-create-update-7ws6f" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.365717 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54f9b7b8d9-d9ssw"] Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.368461 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-dns-svc\") pod \"dnsmasq-dns-54f9b7b8d9-d9ssw\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.368533 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-config\") pod \"dnsmasq-dns-54f9b7b8d9-d9ssw\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.368562 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-ovsdbserver-sb\") pod \"dnsmasq-dns-54f9b7b8d9-d9ssw\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.368653 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dll2f\" (UniqueName: \"kubernetes.io/projected/136fe185-8544-42b9-92df-b0c42d04a4fb-kube-api-access-dll2f\") pod \"dnsmasq-dns-54f9b7b8d9-d9ssw\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.368675 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-ovsdbserver-nb\") pod \"dnsmasq-dns-54f9b7b8d9-d9ssw\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.436674 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-hzc9c"] Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.469768 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-ovsdbserver-sb\") pod \"dnsmasq-dns-54f9b7b8d9-d9ssw\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.469893 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dll2f\" (UniqueName: \"kubernetes.io/projected/136fe185-8544-42b9-92df-b0c42d04a4fb-kube-api-access-dll2f\") pod \"dnsmasq-dns-54f9b7b8d9-d9ssw\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.469918 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-ovsdbserver-nb\") pod \"dnsmasq-dns-54f9b7b8d9-d9ssw\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.469957 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-dns-svc\") pod \"dnsmasq-dns-54f9b7b8d9-d9ssw\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.469984 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-config\") pod \"dnsmasq-dns-54f9b7b8d9-d9ssw\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.472152 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-config\") pod \"dnsmasq-dns-54f9b7b8d9-d9ssw\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.472155 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-dns-svc\") pod \"dnsmasq-dns-54f9b7b8d9-d9ssw\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.472861 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-ovsdbserver-nb\") pod \"dnsmasq-dns-54f9b7b8d9-d9ssw\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.476997 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-ovsdbserver-sb\") pod \"dnsmasq-dns-54f9b7b8d9-d9ssw\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.477610 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-gtjhb" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.506769 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dll2f\" (UniqueName: \"kubernetes.io/projected/136fe185-8544-42b9-92df-b0c42d04a4fb-kube-api-access-dll2f\") pod \"dnsmasq-dns-54f9b7b8d9-d9ssw\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.582771 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-kln9b"] Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.601827 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-278d-account-create-update-7ws6f" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.699331 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.782233 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-kln9b" event={"ID":"bc24e28a-d913-47b0-a352-9962063ffedf","Type":"ContainerStarted","Data":"04be77dc8aba2b4784080a129ae477459d29a3bdc5121d0381cbcde6af9f7236"} Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.783384 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-hzc9c" event={"ID":"fa707c0f-6bdb-4597-b001-d457323f04c1","Type":"ContainerStarted","Data":"f984ab706c058be498ca66cd9c54263456756fc7cc08a8f2c8ff7d4ca754447a"} Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.783407 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-hzc9c" event={"ID":"fa707c0f-6bdb-4597-b001-d457323f04c1","Type":"ContainerStarted","Data":"15d201e884c679772d6ae4ba30de52ef7dfa60bbbfc289896920510c8c0d19ae"} Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.807504 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-hzc9c" podStartSLOduration=1.807488823 podStartE2EDuration="1.807488823s" podCreationTimestamp="2025-11-28 11:27:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:27:29.805116576 +0000 UTC m=+1128.933800786" watchObservedRunningTime="2025-11-28 11:27:29.807488823 +0000 UTC m=+1128.936173033" Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.831543 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-qfdbm"] Nov 28 11:27:29 crc kubenswrapper[4923]: W1128 11:27:29.844149 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9fca30b0_b933_4526_9006_e477a86836a6.slice/crio-e9a5d3cec66ae4383b6688e462e004dd7851d64de579801690b4db2a89fe0d12 WatchSource:0}: Error finding container e9a5d3cec66ae4383b6688e462e004dd7851d64de579801690b4db2a89fe0d12: Status 404 returned error can't find the container with id e9a5d3cec66ae4383b6688e462e004dd7851d64de579801690b4db2a89fe0d12 Nov 28 11:27:29 crc kubenswrapper[4923]: I1128 11:27:29.915898 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-e24f-account-create-update-72bgf"] Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.020100 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-4cc8-account-create-update-xwrmh"] Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.158182 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-gtjhb"] Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.310729 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-278d-account-create-update-7ws6f"] Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.369064 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54f9b7b8d9-d9ssw"] Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.796053 4923 generic.go:334] "Generic (PLEG): container finished" podID="136fe185-8544-42b9-92df-b0c42d04a4fb" containerID="0734859dc5bd26d8f5de06618e946591cec15cb880f123aff332d122a5a500dc" exitCode=0 Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.796299 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" event={"ID":"136fe185-8544-42b9-92df-b0c42d04a4fb","Type":"ContainerDied","Data":"0734859dc5bd26d8f5de06618e946591cec15cb880f123aff332d122a5a500dc"} Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.796324 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" event={"ID":"136fe185-8544-42b9-92df-b0c42d04a4fb","Type":"ContainerStarted","Data":"5bc72763e16a05cfac23ebfe71828dd0828958a725f2e80d2b299e60024b2e27"} Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.800259 4923 generic.go:334] "Generic (PLEG): container finished" podID="5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68" containerID="5c8d3a4a6e04a1023fcfdf085467d3bb970d8858f0760117b0a68a45115cc41d" exitCode=0 Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.800314 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-4cc8-account-create-update-xwrmh" event={"ID":"5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68","Type":"ContainerDied","Data":"5c8d3a4a6e04a1023fcfdf085467d3bb970d8858f0760117b0a68a45115cc41d"} Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.800336 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-4cc8-account-create-update-xwrmh" event={"ID":"5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68","Type":"ContainerStarted","Data":"6b7c652faa89f1b10a0cf2b5bddc9a2dd6f15f532688a329fad584e49feb36d6"} Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.814698 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-278d-account-create-update-7ws6f" event={"ID":"06f81fa3-fcd1-4f4d-9dbe-fe659371c477","Type":"ContainerStarted","Data":"2c8398455971b245c9ed814839cce39984693ffe4db72ead897289a105230ce3"} Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.814739 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-278d-account-create-update-7ws6f" event={"ID":"06f81fa3-fcd1-4f4d-9dbe-fe659371c477","Type":"ContainerStarted","Data":"229360834b03c3b8854981554d2e455ac6d9ebda6da1a402c9a5e82dd8701054"} Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.826689 4923 generic.go:334] "Generic (PLEG): container finished" podID="bc24e28a-d913-47b0-a352-9962063ffedf" containerID="44aa867d35691dce4770d65b122763fa59ba6faa225d1ce23188f9822c30a7ed" exitCode=0 Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.826754 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-kln9b" event={"ID":"bc24e28a-d913-47b0-a352-9962063ffedf","Type":"ContainerDied","Data":"44aa867d35691dce4770d65b122763fa59ba6faa225d1ce23188f9822c30a7ed"} Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.828210 4923 generic.go:334] "Generic (PLEG): container finished" podID="fa707c0f-6bdb-4597-b001-d457323f04c1" containerID="f984ab706c058be498ca66cd9c54263456756fc7cc08a8f2c8ff7d4ca754447a" exitCode=0 Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.828282 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-hzc9c" event={"ID":"fa707c0f-6bdb-4597-b001-d457323f04c1","Type":"ContainerDied","Data":"f984ab706c058be498ca66cd9c54263456756fc7cc08a8f2c8ff7d4ca754447a"} Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.829314 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-qfdbm" event={"ID":"9fca30b0-b933-4526-9006-e477a86836a6","Type":"ContainerStarted","Data":"e9a5d3cec66ae4383b6688e462e004dd7851d64de579801690b4db2a89fe0d12"} Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.834769 4923 generic.go:334] "Generic (PLEG): container finished" podID="856d0fb6-7167-4553-b626-aaa75d43f5ab" containerID="00598b0cdb16553692679fc804bb9418856adaf21da948b49b3df0a6881092a8" exitCode=0 Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.834848 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-gtjhb" event={"ID":"856d0fb6-7167-4553-b626-aaa75d43f5ab","Type":"ContainerDied","Data":"00598b0cdb16553692679fc804bb9418856adaf21da948b49b3df0a6881092a8"} Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.834872 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-gtjhb" event={"ID":"856d0fb6-7167-4553-b626-aaa75d43f5ab","Type":"ContainerStarted","Data":"8397d184954f0ec20229cc49b9da653c09d77554038b57483fd2626f0cf81b85"} Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.836802 4923 generic.go:334] "Generic (PLEG): container finished" podID="68243bc7-2ab3-4632-9ff1-d3af61a0acb3" containerID="93328e0abc8e779b2ab8901ca90188b416c2faa3cbfab4624a1f1142abb42572" exitCode=0 Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.836828 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e24f-account-create-update-72bgf" event={"ID":"68243bc7-2ab3-4632-9ff1-d3af61a0acb3","Type":"ContainerDied","Data":"93328e0abc8e779b2ab8901ca90188b416c2faa3cbfab4624a1f1142abb42572"} Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.836842 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e24f-account-create-update-72bgf" event={"ID":"68243bc7-2ab3-4632-9ff1-d3af61a0acb3","Type":"ContainerStarted","Data":"b8f9673fe02b04767375911af3f5758e16b29e1f1df17426dcba57106c163d96"} Nov 28 11:27:30 crc kubenswrapper[4923]: I1128 11:27:30.883377 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-278d-account-create-update-7ws6f" podStartSLOduration=2.883359726 podStartE2EDuration="2.883359726s" podCreationTimestamp="2025-11-28 11:27:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:27:30.874913147 +0000 UTC m=+1130.003597357" watchObservedRunningTime="2025-11-28 11:27:30.883359726 +0000 UTC m=+1130.012043936" Nov 28 11:27:31 crc kubenswrapper[4923]: I1128 11:27:31.845756 4923 generic.go:334] "Generic (PLEG): container finished" podID="06f81fa3-fcd1-4f4d-9dbe-fe659371c477" containerID="2c8398455971b245c9ed814839cce39984693ffe4db72ead897289a105230ce3" exitCode=0 Nov 28 11:27:31 crc kubenswrapper[4923]: I1128 11:27:31.846129 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-278d-account-create-update-7ws6f" event={"ID":"06f81fa3-fcd1-4f4d-9dbe-fe659371c477","Type":"ContainerDied","Data":"2c8398455971b245c9ed814839cce39984693ffe4db72ead897289a105230ce3"} Nov 28 11:27:31 crc kubenswrapper[4923]: I1128 11:27:31.849048 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" event={"ID":"136fe185-8544-42b9-92df-b0c42d04a4fb","Type":"ContainerStarted","Data":"7769ff15375ac0ae74c80e592f829f4efcbe158a002c162c498c8ae893bd286e"} Nov 28 11:27:31 crc kubenswrapper[4923]: I1128 11:27:31.850042 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:31 crc kubenswrapper[4923]: I1128 11:27:31.889772 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" podStartSLOduration=2.889754821 podStartE2EDuration="2.889754821s" podCreationTimestamp="2025-11-28 11:27:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:27:31.887885468 +0000 UTC m=+1131.016569678" watchObservedRunningTime="2025-11-28 11:27:31.889754821 +0000 UTC m=+1131.018439031" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.207299 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-hzc9c" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.345757 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fa707c0f-6bdb-4597-b001-d457323f04c1-operator-scripts\") pod \"fa707c0f-6bdb-4597-b001-d457323f04c1\" (UID: \"fa707c0f-6bdb-4597-b001-d457323f04c1\") " Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.345799 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c24sg\" (UniqueName: \"kubernetes.io/projected/fa707c0f-6bdb-4597-b001-d457323f04c1-kube-api-access-c24sg\") pod \"fa707c0f-6bdb-4597-b001-d457323f04c1\" (UID: \"fa707c0f-6bdb-4597-b001-d457323f04c1\") " Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.346695 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa707c0f-6bdb-4597-b001-d457323f04c1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fa707c0f-6bdb-4597-b001-d457323f04c1" (UID: "fa707c0f-6bdb-4597-b001-d457323f04c1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.352975 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa707c0f-6bdb-4597-b001-d457323f04c1-kube-api-access-c24sg" (OuterVolumeSpecName: "kube-api-access-c24sg") pod "fa707c0f-6bdb-4597-b001-d457323f04c1" (UID: "fa707c0f-6bdb-4597-b001-d457323f04c1"). InnerVolumeSpecName "kube-api-access-c24sg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.445699 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-kln9b" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.447732 4923 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fa707c0f-6bdb-4597-b001-d457323f04c1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.447761 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c24sg\" (UniqueName: \"kubernetes.io/projected/fa707c0f-6bdb-4597-b001-d457323f04c1-kube-api-access-c24sg\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.454063 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e24f-account-create-update-72bgf" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.481395 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-gtjhb" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.490490 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4cc8-account-create-update-xwrmh" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.549097 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68243bc7-2ab3-4632-9ff1-d3af61a0acb3-operator-scripts\") pod \"68243bc7-2ab3-4632-9ff1-d3af61a0acb3\" (UID: \"68243bc7-2ab3-4632-9ff1-d3af61a0acb3\") " Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.549144 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc24e28a-d913-47b0-a352-9962063ffedf-operator-scripts\") pod \"bc24e28a-d913-47b0-a352-9962063ffedf\" (UID: \"bc24e28a-d913-47b0-a352-9962063ffedf\") " Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.549167 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ldj85\" (UniqueName: \"kubernetes.io/projected/5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68-kube-api-access-ldj85\") pod \"5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68\" (UID: \"5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68\") " Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.549199 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68-operator-scripts\") pod \"5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68\" (UID: \"5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68\") " Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.549783 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc24e28a-d913-47b0-a352-9962063ffedf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bc24e28a-d913-47b0-a352-9962063ffedf" (UID: "bc24e28a-d913-47b0-a352-9962063ffedf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.549809 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/68243bc7-2ab3-4632-9ff1-d3af61a0acb3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "68243bc7-2ab3-4632-9ff1-d3af61a0acb3" (UID: "68243bc7-2ab3-4632-9ff1-d3af61a0acb3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.550146 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68" (UID: "5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.551035 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xqbjz\" (UniqueName: \"kubernetes.io/projected/68243bc7-2ab3-4632-9ff1-d3af61a0acb3-kube-api-access-xqbjz\") pod \"68243bc7-2ab3-4632-9ff1-d3af61a0acb3\" (UID: \"68243bc7-2ab3-4632-9ff1-d3af61a0acb3\") " Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.551076 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/856d0fb6-7167-4553-b626-aaa75d43f5ab-operator-scripts\") pod \"856d0fb6-7167-4553-b626-aaa75d43f5ab\" (UID: \"856d0fb6-7167-4553-b626-aaa75d43f5ab\") " Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.551174 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5r6gg\" (UniqueName: \"kubernetes.io/projected/856d0fb6-7167-4553-b626-aaa75d43f5ab-kube-api-access-5r6gg\") pod \"856d0fb6-7167-4553-b626-aaa75d43f5ab\" (UID: \"856d0fb6-7167-4553-b626-aaa75d43f5ab\") " Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.551206 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cbp5k\" (UniqueName: \"kubernetes.io/projected/bc24e28a-d913-47b0-a352-9962063ffedf-kube-api-access-cbp5k\") pod \"bc24e28a-d913-47b0-a352-9962063ffedf\" (UID: \"bc24e28a-d913-47b0-a352-9962063ffedf\") " Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.551598 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/856d0fb6-7167-4553-b626-aaa75d43f5ab-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "856d0fb6-7167-4553-b626-aaa75d43f5ab" (UID: "856d0fb6-7167-4553-b626-aaa75d43f5ab"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.551702 4923 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68243bc7-2ab3-4632-9ff1-d3af61a0acb3-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.551720 4923 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc24e28a-d913-47b0-a352-9962063ffedf-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.551729 4923 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.551739 4923 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/856d0fb6-7167-4553-b626-aaa75d43f5ab-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.554050 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68243bc7-2ab3-4632-9ff1-d3af61a0acb3-kube-api-access-xqbjz" (OuterVolumeSpecName: "kube-api-access-xqbjz") pod "68243bc7-2ab3-4632-9ff1-d3af61a0acb3" (UID: "68243bc7-2ab3-4632-9ff1-d3af61a0acb3"). InnerVolumeSpecName "kube-api-access-xqbjz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.555874 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/856d0fb6-7167-4553-b626-aaa75d43f5ab-kube-api-access-5r6gg" (OuterVolumeSpecName: "kube-api-access-5r6gg") pod "856d0fb6-7167-4553-b626-aaa75d43f5ab" (UID: "856d0fb6-7167-4553-b626-aaa75d43f5ab"). InnerVolumeSpecName "kube-api-access-5r6gg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.568023 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc24e28a-d913-47b0-a352-9962063ffedf-kube-api-access-cbp5k" (OuterVolumeSpecName: "kube-api-access-cbp5k") pod "bc24e28a-d913-47b0-a352-9962063ffedf" (UID: "bc24e28a-d913-47b0-a352-9962063ffedf"). InnerVolumeSpecName "kube-api-access-cbp5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.579976 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68-kube-api-access-ldj85" (OuterVolumeSpecName: "kube-api-access-ldj85") pod "5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68" (UID: "5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68"). InnerVolumeSpecName "kube-api-access-ldj85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.653439 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ldj85\" (UniqueName: \"kubernetes.io/projected/5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68-kube-api-access-ldj85\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.654007 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xqbjz\" (UniqueName: \"kubernetes.io/projected/68243bc7-2ab3-4632-9ff1-d3af61a0acb3-kube-api-access-xqbjz\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.654044 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5r6gg\" (UniqueName: \"kubernetes.io/projected/856d0fb6-7167-4553-b626-aaa75d43f5ab-kube-api-access-5r6gg\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.654053 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cbp5k\" (UniqueName: \"kubernetes.io/projected/bc24e28a-d913-47b0-a352-9962063ffedf-kube-api-access-cbp5k\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.858170 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-kln9b" event={"ID":"bc24e28a-d913-47b0-a352-9962063ffedf","Type":"ContainerDied","Data":"04be77dc8aba2b4784080a129ae477459d29a3bdc5121d0381cbcde6af9f7236"} Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.858210 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="04be77dc8aba2b4784080a129ae477459d29a3bdc5121d0381cbcde6af9f7236" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.858281 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-kln9b" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.866280 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-hzc9c" event={"ID":"fa707c0f-6bdb-4597-b001-d457323f04c1","Type":"ContainerDied","Data":"15d201e884c679772d6ae4ba30de52ef7dfa60bbbfc289896920510c8c0d19ae"} Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.866319 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="15d201e884c679772d6ae4ba30de52ef7dfa60bbbfc289896920510c8c0d19ae" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.866392 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-hzc9c" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.870123 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-gtjhb" event={"ID":"856d0fb6-7167-4553-b626-aaa75d43f5ab","Type":"ContainerDied","Data":"8397d184954f0ec20229cc49b9da653c09d77554038b57483fd2626f0cf81b85"} Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.870159 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8397d184954f0ec20229cc49b9da653c09d77554038b57483fd2626f0cf81b85" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.870219 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-gtjhb" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.873990 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e24f-account-create-update-72bgf" event={"ID":"68243bc7-2ab3-4632-9ff1-d3af61a0acb3","Type":"ContainerDied","Data":"b8f9673fe02b04767375911af3f5758e16b29e1f1df17426dcba57106c163d96"} Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.874013 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b8f9673fe02b04767375911af3f5758e16b29e1f1df17426dcba57106c163d96" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.873998 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e24f-account-create-update-72bgf" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.876141 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4cc8-account-create-update-xwrmh" Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.879135 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-4cc8-account-create-update-xwrmh" event={"ID":"5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68","Type":"ContainerDied","Data":"6b7c652faa89f1b10a0cf2b5bddc9a2dd6f15f532688a329fad584e49feb36d6"} Nov 28 11:27:32 crc kubenswrapper[4923]: I1128 11:27:32.879203 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b7c652faa89f1b10a0cf2b5bddc9a2dd6f15f532688a329fad584e49feb36d6" Nov 28 11:27:35 crc kubenswrapper[4923]: I1128 11:27:35.605764 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-278d-account-create-update-7ws6f" Nov 28 11:27:35 crc kubenswrapper[4923]: I1128 11:27:35.712604 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/06f81fa3-fcd1-4f4d-9dbe-fe659371c477-operator-scripts\") pod \"06f81fa3-fcd1-4f4d-9dbe-fe659371c477\" (UID: \"06f81fa3-fcd1-4f4d-9dbe-fe659371c477\") " Nov 28 11:27:35 crc kubenswrapper[4923]: I1128 11:27:35.712719 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ckgqq\" (UniqueName: \"kubernetes.io/projected/06f81fa3-fcd1-4f4d-9dbe-fe659371c477-kube-api-access-ckgqq\") pod \"06f81fa3-fcd1-4f4d-9dbe-fe659371c477\" (UID: \"06f81fa3-fcd1-4f4d-9dbe-fe659371c477\") " Nov 28 11:27:35 crc kubenswrapper[4923]: I1128 11:27:35.713482 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06f81fa3-fcd1-4f4d-9dbe-fe659371c477-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "06f81fa3-fcd1-4f4d-9dbe-fe659371c477" (UID: "06f81fa3-fcd1-4f4d-9dbe-fe659371c477"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:35 crc kubenswrapper[4923]: I1128 11:27:35.721389 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06f81fa3-fcd1-4f4d-9dbe-fe659371c477-kube-api-access-ckgqq" (OuterVolumeSpecName: "kube-api-access-ckgqq") pod "06f81fa3-fcd1-4f4d-9dbe-fe659371c477" (UID: "06f81fa3-fcd1-4f4d-9dbe-fe659371c477"). InnerVolumeSpecName "kube-api-access-ckgqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:27:35 crc kubenswrapper[4923]: I1128 11:27:35.814921 4923 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/06f81fa3-fcd1-4f4d-9dbe-fe659371c477-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:35 crc kubenswrapper[4923]: I1128 11:27:35.814966 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ckgqq\" (UniqueName: \"kubernetes.io/projected/06f81fa3-fcd1-4f4d-9dbe-fe659371c477-kube-api-access-ckgqq\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:35 crc kubenswrapper[4923]: I1128 11:27:35.904780 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-278d-account-create-update-7ws6f" event={"ID":"06f81fa3-fcd1-4f4d-9dbe-fe659371c477","Type":"ContainerDied","Data":"229360834b03c3b8854981554d2e455ac6d9ebda6da1a402c9a5e82dd8701054"} Nov 28 11:27:35 crc kubenswrapper[4923]: I1128 11:27:35.904815 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="229360834b03c3b8854981554d2e455ac6d9ebda6da1a402c9a5e82dd8701054" Nov 28 11:27:35 crc kubenswrapper[4923]: I1128 11:27:35.904861 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-278d-account-create-update-7ws6f" Nov 28 11:27:35 crc kubenswrapper[4923]: I1128 11:27:35.913492 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-qfdbm" event={"ID":"9fca30b0-b933-4526-9006-e477a86836a6","Type":"ContainerStarted","Data":"f3490ef1976735489e353012e64ae07dfbc4a1a1bc13fce7e22fade279ae7ad1"} Nov 28 11:27:35 crc kubenswrapper[4923]: I1128 11:27:35.940470 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-qfdbm" podStartSLOduration=2.277609108 podStartE2EDuration="7.940441351s" podCreationTimestamp="2025-11-28 11:27:28 +0000 UTC" firstStartedPulling="2025-11-28 11:27:29.850355017 +0000 UTC m=+1128.979039227" lastFinishedPulling="2025-11-28 11:27:35.51318726 +0000 UTC m=+1134.641871470" observedRunningTime="2025-11-28 11:27:35.936212932 +0000 UTC m=+1135.064897142" watchObservedRunningTime="2025-11-28 11:27:35.940441351 +0000 UTC m=+1135.069125601" Nov 28 11:27:37 crc kubenswrapper[4923]: I1128 11:27:37.503302 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:27:38 crc kubenswrapper[4923]: I1128 11:27:38.944499 4923 generic.go:334] "Generic (PLEG): container finished" podID="9fca30b0-b933-4526-9006-e477a86836a6" containerID="f3490ef1976735489e353012e64ae07dfbc4a1a1bc13fce7e22fade279ae7ad1" exitCode=0 Nov 28 11:27:38 crc kubenswrapper[4923]: I1128 11:27:38.944570 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-qfdbm" event={"ID":"9fca30b0-b933-4526-9006-e477a86836a6","Type":"ContainerDied","Data":"f3490ef1976735489e353012e64ae07dfbc4a1a1bc13fce7e22fade279ae7ad1"} Nov 28 11:27:39 crc kubenswrapper[4923]: I1128 11:27:39.701235 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:27:39 crc kubenswrapper[4923]: I1128 11:27:39.795098 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-ms5mm"] Nov 28 11:27:39 crc kubenswrapper[4923]: I1128 11:27:39.795431 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" podUID="4b2dbc83-52f0-425a-955f-795d78314254" containerName="dnsmasq-dns" containerID="cri-o://e63afc1c95653740ecf9248d90d0fc1bc2e2f6dc858f6f30b7d0d888e9ff99b1" gracePeriod=10 Nov 28 11:27:39 crc kubenswrapper[4923]: I1128 11:27:39.968102 4923 generic.go:334] "Generic (PLEG): container finished" podID="4b2dbc83-52f0-425a-955f-795d78314254" containerID="e63afc1c95653740ecf9248d90d0fc1bc2e2f6dc858f6f30b7d0d888e9ff99b1" exitCode=0 Nov 28 11:27:39 crc kubenswrapper[4923]: I1128 11:27:39.968275 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" event={"ID":"4b2dbc83-52f0-425a-955f-795d78314254","Type":"ContainerDied","Data":"e63afc1c95653740ecf9248d90d0fc1bc2e2f6dc858f6f30b7d0d888e9ff99b1"} Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.237914 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.341832 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-qfdbm" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.399294 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-ovsdbserver-nb\") pod \"4b2dbc83-52f0-425a-955f-795d78314254\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.399408 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jv7pt\" (UniqueName: \"kubernetes.io/projected/4b2dbc83-52f0-425a-955f-795d78314254-kube-api-access-jv7pt\") pod \"4b2dbc83-52f0-425a-955f-795d78314254\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.399467 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-config\") pod \"4b2dbc83-52f0-425a-955f-795d78314254\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.399531 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-ovsdbserver-sb\") pod \"4b2dbc83-52f0-425a-955f-795d78314254\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.399560 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-dns-svc\") pod \"4b2dbc83-52f0-425a-955f-795d78314254\" (UID: \"4b2dbc83-52f0-425a-955f-795d78314254\") " Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.404796 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b2dbc83-52f0-425a-955f-795d78314254-kube-api-access-jv7pt" (OuterVolumeSpecName: "kube-api-access-jv7pt") pod "4b2dbc83-52f0-425a-955f-795d78314254" (UID: "4b2dbc83-52f0-425a-955f-795d78314254"). InnerVolumeSpecName "kube-api-access-jv7pt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.445012 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-config" (OuterVolumeSpecName: "config") pod "4b2dbc83-52f0-425a-955f-795d78314254" (UID: "4b2dbc83-52f0-425a-955f-795d78314254"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.446338 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4b2dbc83-52f0-425a-955f-795d78314254" (UID: "4b2dbc83-52f0-425a-955f-795d78314254"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.453281 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4b2dbc83-52f0-425a-955f-795d78314254" (UID: "4b2dbc83-52f0-425a-955f-795d78314254"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.454038 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4b2dbc83-52f0-425a-955f-795d78314254" (UID: "4b2dbc83-52f0-425a-955f-795d78314254"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.501684 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9fca30b0-b933-4526-9006-e477a86836a6-config-data\") pod \"9fca30b0-b933-4526-9006-e477a86836a6\" (UID: \"9fca30b0-b933-4526-9006-e477a86836a6\") " Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.501900 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8x59\" (UniqueName: \"kubernetes.io/projected/9fca30b0-b933-4526-9006-e477a86836a6-kube-api-access-g8x59\") pod \"9fca30b0-b933-4526-9006-e477a86836a6\" (UID: \"9fca30b0-b933-4526-9006-e477a86836a6\") " Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.502376 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9fca30b0-b933-4526-9006-e477a86836a6-combined-ca-bundle\") pod \"9fca30b0-b933-4526-9006-e477a86836a6\" (UID: \"9fca30b0-b933-4526-9006-e477a86836a6\") " Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.502787 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.502851 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jv7pt\" (UniqueName: \"kubernetes.io/projected/4b2dbc83-52f0-425a-955f-795d78314254-kube-api-access-jv7pt\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.502909 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.502978 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.503043 4923 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4b2dbc83-52f0-425a-955f-795d78314254-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.505135 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9fca30b0-b933-4526-9006-e477a86836a6-kube-api-access-g8x59" (OuterVolumeSpecName: "kube-api-access-g8x59") pod "9fca30b0-b933-4526-9006-e477a86836a6" (UID: "9fca30b0-b933-4526-9006-e477a86836a6"). InnerVolumeSpecName "kube-api-access-g8x59". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.521614 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9fca30b0-b933-4526-9006-e477a86836a6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9fca30b0-b933-4526-9006-e477a86836a6" (UID: "9fca30b0-b933-4526-9006-e477a86836a6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.542942 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9fca30b0-b933-4526-9006-e477a86836a6-config-data" (OuterVolumeSpecName: "config-data") pod "9fca30b0-b933-4526-9006-e477a86836a6" (UID: "9fca30b0-b933-4526-9006-e477a86836a6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.604873 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8x59\" (UniqueName: \"kubernetes.io/projected/9fca30b0-b933-4526-9006-e477a86836a6-kube-api-access-g8x59\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.604902 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9fca30b0-b933-4526-9006-e477a86836a6-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.604914 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9fca30b0-b933-4526-9006-e477a86836a6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.981457 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" event={"ID":"4b2dbc83-52f0-425a-955f-795d78314254","Type":"ContainerDied","Data":"340e3aaa369354bb03761b1e46c7892809e79a31c25b9939cd05a236dfffc9d8"} Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.981506 4923 scope.go:117] "RemoveContainer" containerID="e63afc1c95653740ecf9248d90d0fc1bc2e2f6dc858f6f30b7d0d888e9ff99b1" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.981613 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-ms5mm" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.986969 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-qfdbm" event={"ID":"9fca30b0-b933-4526-9006-e477a86836a6","Type":"ContainerDied","Data":"e9a5d3cec66ae4383b6688e462e004dd7851d64de579801690b4db2a89fe0d12"} Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.987007 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e9a5d3cec66ae4383b6688e462e004dd7851d64de579801690b4db2a89fe0d12" Nov 28 11:27:40 crc kubenswrapper[4923]: I1128 11:27:40.987062 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-qfdbm" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.012346 4923 scope.go:117] "RemoveContainer" containerID="8e710f915bff9b0cdfdca645aba662df3e60674b9055af08cf04188a2f87d620" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.056040 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-ms5mm"] Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.061876 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-ms5mm"] Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.177466 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b2dbc83-52f0-425a-955f-795d78314254" path="/var/lib/kubelet/pods/4b2dbc83-52f0-425a-955f-795d78314254/volumes" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.266815 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-8v4gc"] Nov 28 11:27:41 crc kubenswrapper[4923]: E1128 11:27:41.267549 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc24e28a-d913-47b0-a352-9962063ffedf" containerName="mariadb-database-create" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.267611 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc24e28a-d913-47b0-a352-9962063ffedf" containerName="mariadb-database-create" Nov 28 11:27:41 crc kubenswrapper[4923]: E1128 11:27:41.267691 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b2dbc83-52f0-425a-955f-795d78314254" containerName="init" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.267752 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b2dbc83-52f0-425a-955f-795d78314254" containerName="init" Nov 28 11:27:41 crc kubenswrapper[4923]: E1128 11:27:41.267826 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06f81fa3-fcd1-4f4d-9dbe-fe659371c477" containerName="mariadb-account-create-update" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.267881 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="06f81fa3-fcd1-4f4d-9dbe-fe659371c477" containerName="mariadb-account-create-update" Nov 28 11:27:41 crc kubenswrapper[4923]: E1128 11:27:41.267959 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="856d0fb6-7167-4553-b626-aaa75d43f5ab" containerName="mariadb-database-create" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.268011 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="856d0fb6-7167-4553-b626-aaa75d43f5ab" containerName="mariadb-database-create" Nov 28 11:27:41 crc kubenswrapper[4923]: E1128 11:27:41.268064 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa707c0f-6bdb-4597-b001-d457323f04c1" containerName="mariadb-database-create" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.268113 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa707c0f-6bdb-4597-b001-d457323f04c1" containerName="mariadb-database-create" Nov 28 11:27:41 crc kubenswrapper[4923]: E1128 11:27:41.268172 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68243bc7-2ab3-4632-9ff1-d3af61a0acb3" containerName="mariadb-account-create-update" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.268226 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="68243bc7-2ab3-4632-9ff1-d3af61a0acb3" containerName="mariadb-account-create-update" Nov 28 11:27:41 crc kubenswrapper[4923]: E1128 11:27:41.268276 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fca30b0-b933-4526-9006-e477a86836a6" containerName="keystone-db-sync" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.268328 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fca30b0-b933-4526-9006-e477a86836a6" containerName="keystone-db-sync" Nov 28 11:27:41 crc kubenswrapper[4923]: E1128 11:27:41.268385 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68" containerName="mariadb-account-create-update" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.268437 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68" containerName="mariadb-account-create-update" Nov 28 11:27:41 crc kubenswrapper[4923]: E1128 11:27:41.268497 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b2dbc83-52f0-425a-955f-795d78314254" containerName="dnsmasq-dns" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.268551 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b2dbc83-52f0-425a-955f-795d78314254" containerName="dnsmasq-dns" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.271991 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc24e28a-d913-47b0-a352-9962063ffedf" containerName="mariadb-database-create" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.272127 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="06f81fa3-fcd1-4f4d-9dbe-fe659371c477" containerName="mariadb-account-create-update" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.272190 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="68243bc7-2ab3-4632-9ff1-d3af61a0acb3" containerName="mariadb-account-create-update" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.272249 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa707c0f-6bdb-4597-b001-d457323f04c1" containerName="mariadb-database-create" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.272307 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="9fca30b0-b933-4526-9006-e477a86836a6" containerName="keystone-db-sync" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.272371 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b2dbc83-52f0-425a-955f-795d78314254" containerName="dnsmasq-dns" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.272431 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68" containerName="mariadb-account-create-update" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.272488 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="856d0fb6-7167-4553-b626-aaa75d43f5ab" containerName="mariadb-database-create" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.273109 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.280052 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.280260 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.280419 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.280519 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-4f87p" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.284528 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.290515 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6546db6db7-9twqd"] Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.292303 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.300298 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8v4gc"] Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.332070 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6546db6db7-9twqd"] Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.418389 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-combined-ca-bundle\") pod \"keystone-bootstrap-8v4gc\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.418666 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-config\") pod \"dnsmasq-dns-6546db6db7-9twqd\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.418754 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-fernet-keys\") pod \"keystone-bootstrap-8v4gc\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.418836 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-scripts\") pod \"keystone-bootstrap-8v4gc\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.418918 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-credential-keys\") pod \"keystone-bootstrap-8v4gc\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.419228 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-config-data\") pod \"keystone-bootstrap-8v4gc\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.419293 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-dns-svc\") pod \"dnsmasq-dns-6546db6db7-9twqd\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.419445 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bffr\" (UniqueName: \"kubernetes.io/projected/c462cd46-ea0b-4567-a592-d43b436767e5-kube-api-access-7bffr\") pod \"keystone-bootstrap-8v4gc\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.419478 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87dm4\" (UniqueName: \"kubernetes.io/projected/a7c26baf-9942-4ffd-bfd4-b22b4a631307-kube-api-access-87dm4\") pod \"dnsmasq-dns-6546db6db7-9twqd\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.419551 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-ovsdbserver-nb\") pod \"dnsmasq-dns-6546db6db7-9twqd\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.419674 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-ovsdbserver-sb\") pod \"dnsmasq-dns-6546db6db7-9twqd\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.467571 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-zvrbl"] Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.475856 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-zvrbl" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.477946 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-275ps" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.478334 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.482683 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.498060 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-zvrbl"] Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.520744 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-config-data\") pod \"keystone-bootstrap-8v4gc\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.520921 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-dns-svc\") pod \"dnsmasq-dns-6546db6db7-9twqd\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.521035 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bffr\" (UniqueName: \"kubernetes.io/projected/c462cd46-ea0b-4567-a592-d43b436767e5-kube-api-access-7bffr\") pod \"keystone-bootstrap-8v4gc\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.521141 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87dm4\" (UniqueName: \"kubernetes.io/projected/a7c26baf-9942-4ffd-bfd4-b22b4a631307-kube-api-access-87dm4\") pod \"dnsmasq-dns-6546db6db7-9twqd\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.521223 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-ovsdbserver-nb\") pod \"dnsmasq-dns-6546db6db7-9twqd\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.521309 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-ovsdbserver-sb\") pod \"dnsmasq-dns-6546db6db7-9twqd\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.521403 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-combined-ca-bundle\") pod \"keystone-bootstrap-8v4gc\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.521475 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-config\") pod \"dnsmasq-dns-6546db6db7-9twqd\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.522056 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-fernet-keys\") pod \"keystone-bootstrap-8v4gc\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.522173 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-scripts\") pod \"keystone-bootstrap-8v4gc\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.522295 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-credential-keys\") pod \"keystone-bootstrap-8v4gc\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.522419 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-config\") pod \"dnsmasq-dns-6546db6db7-9twqd\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.526731 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-ovsdbserver-sb\") pod \"dnsmasq-dns-6546db6db7-9twqd\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.526848 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-dns-svc\") pod \"dnsmasq-dns-6546db6db7-9twqd\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.527388 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-ovsdbserver-nb\") pod \"dnsmasq-dns-6546db6db7-9twqd\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.530486 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-credential-keys\") pod \"keystone-bootstrap-8v4gc\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.534710 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-config-data\") pod \"keystone-bootstrap-8v4gc\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.537564 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-combined-ca-bundle\") pod \"keystone-bootstrap-8v4gc\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.537773 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-scripts\") pod \"keystone-bootstrap-8v4gc\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.553115 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-fernet-keys\") pod \"keystone-bootstrap-8v4gc\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.555422 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87dm4\" (UniqueName: \"kubernetes.io/projected/a7c26baf-9942-4ffd-bfd4-b22b4a631307-kube-api-access-87dm4\") pod \"dnsmasq-dns-6546db6db7-9twqd\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.569272 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bffr\" (UniqueName: \"kubernetes.io/projected/c462cd46-ea0b-4567-a592-d43b436767e5-kube-api-access-7bffr\") pod \"keystone-bootstrap-8v4gc\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.589908 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-s6twc"] Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.590943 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.592022 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.603472 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-d7bh7" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.603974 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.605961 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.608396 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-s6twc"] Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.623650 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.637286 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ab7a5c22-b1d8-49e8-9420-25485e5dabd7-config\") pod \"neutron-db-sync-zvrbl\" (UID: \"ab7a5c22-b1d8-49e8-9420-25485e5dabd7\") " pod="openstack/neutron-db-sync-zvrbl" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.637481 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbrsc\" (UniqueName: \"kubernetes.io/projected/ab7a5c22-b1d8-49e8-9420-25485e5dabd7-kube-api-access-cbrsc\") pod \"neutron-db-sync-zvrbl\" (UID: \"ab7a5c22-b1d8-49e8-9420-25485e5dabd7\") " pod="openstack/neutron-db-sync-zvrbl" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.637600 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab7a5c22-b1d8-49e8-9420-25485e5dabd7-combined-ca-bundle\") pod \"neutron-db-sync-zvrbl\" (UID: \"ab7a5c22-b1d8-49e8-9420-25485e5dabd7\") " pod="openstack/neutron-db-sync-zvrbl" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.752107 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-scripts\") pod \"cinder-db-sync-s6twc\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.752234 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab7a5c22-b1d8-49e8-9420-25485e5dabd7-combined-ca-bundle\") pod \"neutron-db-sync-zvrbl\" (UID: \"ab7a5c22-b1d8-49e8-9420-25485e5dabd7\") " pod="openstack/neutron-db-sync-zvrbl" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.752321 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-config-data\") pod \"cinder-db-sync-s6twc\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.752395 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-combined-ca-bundle\") pod \"cinder-db-sync-s6twc\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.752480 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-db-sync-config-data\") pod \"cinder-db-sync-s6twc\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.752580 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qldm2\" (UniqueName: \"kubernetes.io/projected/4c7bc447-b1f7-4e68-b0da-310515aecea9-kube-api-access-qldm2\") pod \"cinder-db-sync-s6twc\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.752655 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ab7a5c22-b1d8-49e8-9420-25485e5dabd7-config\") pod \"neutron-db-sync-zvrbl\" (UID: \"ab7a5c22-b1d8-49e8-9420-25485e5dabd7\") " pod="openstack/neutron-db-sync-zvrbl" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.752729 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cbrsc\" (UniqueName: \"kubernetes.io/projected/ab7a5c22-b1d8-49e8-9420-25485e5dabd7-kube-api-access-cbrsc\") pod \"neutron-db-sync-zvrbl\" (UID: \"ab7a5c22-b1d8-49e8-9420-25485e5dabd7\") " pod="openstack/neutron-db-sync-zvrbl" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.753520 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c7bc447-b1f7-4e68-b0da-310515aecea9-etc-machine-id\") pod \"cinder-db-sync-s6twc\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.762102 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-g6z8k"] Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.778404 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/ab7a5c22-b1d8-49e8-9420-25485e5dabd7-config\") pod \"neutron-db-sync-zvrbl\" (UID: \"ab7a5c22-b1d8-49e8-9420-25485e5dabd7\") " pod="openstack/neutron-db-sync-zvrbl" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.779421 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-g6z8k"] Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.779570 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-g6z8k" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.796340 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab7a5c22-b1d8-49e8-9420-25485e5dabd7-combined-ca-bundle\") pod \"neutron-db-sync-zvrbl\" (UID: \"ab7a5c22-b1d8-49e8-9420-25485e5dabd7\") " pod="openstack/neutron-db-sync-zvrbl" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.796458 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-b867g"] Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.796653 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-q25nv" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.796904 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.799164 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-b867g" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.799473 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.804549 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6546db6db7-9twqd"] Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.805016 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.809940 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-bgjhb" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.830974 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-b867g"] Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.830980 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cbrsc\" (UniqueName: \"kubernetes.io/projected/ab7a5c22-b1d8-49e8-9420-25485e5dabd7-kube-api-access-cbrsc\") pod \"neutron-db-sync-zvrbl\" (UID: \"ab7a5c22-b1d8-49e8-9420-25485e5dabd7\") " pod="openstack/neutron-db-sync-zvrbl" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.831004 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="1bddc188-1e43-4efd-9228-ac466ce69994" containerName="galera" probeResult="failure" output="command timed out" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.854661 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qldm2\" (UniqueName: \"kubernetes.io/projected/4c7bc447-b1f7-4e68-b0da-310515aecea9-kube-api-access-qldm2\") pod \"cinder-db-sync-s6twc\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.854898 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c7bc447-b1f7-4e68-b0da-310515aecea9-etc-machine-id\") pod \"cinder-db-sync-s6twc\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.855033 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6803cf35-bd54-4b83-a5b1-42cae252f98d-logs\") pod \"placement-db-sync-g6z8k\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " pod="openstack/placement-db-sync-g6z8k" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.855122 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-scripts\") pod \"cinder-db-sync-s6twc\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.855199 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1813822f-07d2-4a68-98bf-26cf5edd6707-combined-ca-bundle\") pod \"barbican-db-sync-b867g\" (UID: \"1813822f-07d2-4a68-98bf-26cf5edd6707\") " pod="openstack/barbican-db-sync-b867g" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.855275 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1813822f-07d2-4a68-98bf-26cf5edd6707-db-sync-config-data\") pod \"barbican-db-sync-b867g\" (UID: \"1813822f-07d2-4a68-98bf-26cf5edd6707\") " pod="openstack/barbican-db-sync-b867g" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.855356 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6803cf35-bd54-4b83-a5b1-42cae252f98d-scripts\") pod \"placement-db-sync-g6z8k\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " pod="openstack/placement-db-sync-g6z8k" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.855445 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-config-data\") pod \"cinder-db-sync-s6twc\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.855519 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6803cf35-bd54-4b83-a5b1-42cae252f98d-combined-ca-bundle\") pod \"placement-db-sync-g6z8k\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " pod="openstack/placement-db-sync-g6z8k" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.855582 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-combined-ca-bundle\") pod \"cinder-db-sync-s6twc\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.855658 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6803cf35-bd54-4b83-a5b1-42cae252f98d-config-data\") pod \"placement-db-sync-g6z8k\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " pod="openstack/placement-db-sync-g6z8k" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.855720 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktjcf\" (UniqueName: \"kubernetes.io/projected/6803cf35-bd54-4b83-a5b1-42cae252f98d-kube-api-access-ktjcf\") pod \"placement-db-sync-g6z8k\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " pod="openstack/placement-db-sync-g6z8k" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.855878 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-db-sync-config-data\") pod \"cinder-db-sync-s6twc\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.855970 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xr57z\" (UniqueName: \"kubernetes.io/projected/1813822f-07d2-4a68-98bf-26cf5edd6707-kube-api-access-xr57z\") pod \"barbican-db-sync-b867g\" (UID: \"1813822f-07d2-4a68-98bf-26cf5edd6707\") " pod="openstack/barbican-db-sync-b867g" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.856344 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c7bc447-b1f7-4e68-b0da-310515aecea9-etc-machine-id\") pod \"cinder-db-sync-s6twc\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.869265 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-combined-ca-bundle\") pod \"cinder-db-sync-s6twc\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.869584 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-config-data\") pod \"cinder-db-sync-s6twc\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.870447 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-db-sync-config-data\") pod \"cinder-db-sync-s6twc\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.879246 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7987f74bbc-9zsf6"] Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.880506 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.884137 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qldm2\" (UniqueName: \"kubernetes.io/projected/4c7bc447-b1f7-4e68-b0da-310515aecea9-kube-api-access-qldm2\") pod \"cinder-db-sync-s6twc\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.887442 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-scripts\") pod \"cinder-db-sync-s6twc\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.956292 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7987f74bbc-9zsf6"] Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.967078 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.968867 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.970314 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6803cf35-bd54-4b83-a5b1-42cae252f98d-logs\") pod \"placement-db-sync-g6z8k\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " pod="openstack/placement-db-sync-g6z8k" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.970379 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-ovsdbserver-nb\") pod \"dnsmasq-dns-7987f74bbc-9zsf6\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.970404 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2n5s\" (UniqueName: \"kubernetes.io/projected/b1a828ce-0622-4f74-937b-4341f9795501-kube-api-access-s2n5s\") pod \"dnsmasq-dns-7987f74bbc-9zsf6\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.970436 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1813822f-07d2-4a68-98bf-26cf5edd6707-combined-ca-bundle\") pod \"barbican-db-sync-b867g\" (UID: \"1813822f-07d2-4a68-98bf-26cf5edd6707\") " pod="openstack/barbican-db-sync-b867g" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.970470 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1813822f-07d2-4a68-98bf-26cf5edd6707-db-sync-config-data\") pod \"barbican-db-sync-b867g\" (UID: \"1813822f-07d2-4a68-98bf-26cf5edd6707\") " pod="openstack/barbican-db-sync-b867g" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.970515 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-dns-svc\") pod \"dnsmasq-dns-7987f74bbc-9zsf6\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.970538 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6803cf35-bd54-4b83-a5b1-42cae252f98d-scripts\") pod \"placement-db-sync-g6z8k\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " pod="openstack/placement-db-sync-g6z8k" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.970552 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-config\") pod \"dnsmasq-dns-7987f74bbc-9zsf6\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.970609 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6803cf35-bd54-4b83-a5b1-42cae252f98d-combined-ca-bundle\") pod \"placement-db-sync-g6z8k\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " pod="openstack/placement-db-sync-g6z8k" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.970628 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-ovsdbserver-sb\") pod \"dnsmasq-dns-7987f74bbc-9zsf6\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.970664 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6803cf35-bd54-4b83-a5b1-42cae252f98d-config-data\") pod \"placement-db-sync-g6z8k\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " pod="openstack/placement-db-sync-g6z8k" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.970684 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktjcf\" (UniqueName: \"kubernetes.io/projected/6803cf35-bd54-4b83-a5b1-42cae252f98d-kube-api-access-ktjcf\") pod \"placement-db-sync-g6z8k\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " pod="openstack/placement-db-sync-g6z8k" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.970734 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xr57z\" (UniqueName: \"kubernetes.io/projected/1813822f-07d2-4a68-98bf-26cf5edd6707-kube-api-access-xr57z\") pod \"barbican-db-sync-b867g\" (UID: \"1813822f-07d2-4a68-98bf-26cf5edd6707\") " pod="openstack/barbican-db-sync-b867g" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.973252 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6803cf35-bd54-4b83-a5b1-42cae252f98d-logs\") pod \"placement-db-sync-g6z8k\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " pod="openstack/placement-db-sync-g6z8k" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.994728 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.994908 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.996073 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:27:41 crc kubenswrapper[4923]: I1128 11:27:41.998533 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1813822f-07d2-4a68-98bf-26cf5edd6707-db-sync-config-data\") pod \"barbican-db-sync-b867g\" (UID: \"1813822f-07d2-4a68-98bf-26cf5edd6707\") " pod="openstack/barbican-db-sync-b867g" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.003837 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktjcf\" (UniqueName: \"kubernetes.io/projected/6803cf35-bd54-4b83-a5b1-42cae252f98d-kube-api-access-ktjcf\") pod \"placement-db-sync-g6z8k\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " pod="openstack/placement-db-sync-g6z8k" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.012166 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xr57z\" (UniqueName: \"kubernetes.io/projected/1813822f-07d2-4a68-98bf-26cf5edd6707-kube-api-access-xr57z\") pod \"barbican-db-sync-b867g\" (UID: \"1813822f-07d2-4a68-98bf-26cf5edd6707\") " pod="openstack/barbican-db-sync-b867g" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.040075 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6803cf35-bd54-4b83-a5b1-42cae252f98d-scripts\") pod \"placement-db-sync-g6z8k\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " pod="openstack/placement-db-sync-g6z8k" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.041094 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1813822f-07d2-4a68-98bf-26cf5edd6707-combined-ca-bundle\") pod \"barbican-db-sync-b867g\" (UID: \"1813822f-07d2-4a68-98bf-26cf5edd6707\") " pod="openstack/barbican-db-sync-b867g" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.068988 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6803cf35-bd54-4b83-a5b1-42cae252f98d-combined-ca-bundle\") pod \"placement-db-sync-g6z8k\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " pod="openstack/placement-db-sync-g6z8k" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.071855 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-ovsdbserver-nb\") pod \"dnsmasq-dns-7987f74bbc-9zsf6\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.071890 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2n5s\" (UniqueName: \"kubernetes.io/projected/b1a828ce-0622-4f74-937b-4341f9795501-kube-api-access-s2n5s\") pod \"dnsmasq-dns-7987f74bbc-9zsf6\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.071917 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-scripts\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.072031 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-dns-svc\") pod \"dnsmasq-dns-7987f74bbc-9zsf6\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.072049 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.072075 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-config\") pod \"dnsmasq-dns-7987f74bbc-9zsf6\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.072094 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.072118 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dlbl\" (UniqueName: \"kubernetes.io/projected/5b557d79-c22c-47a7-b460-d65c25c2bca8-kube-api-access-4dlbl\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.072139 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-ovsdbserver-sb\") pod \"dnsmasq-dns-7987f74bbc-9zsf6\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.072152 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b557d79-c22c-47a7-b460-d65c25c2bca8-log-httpd\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.072185 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b557d79-c22c-47a7-b460-d65c25c2bca8-run-httpd\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.072200 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-config-data\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.072954 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-ovsdbserver-nb\") pod \"dnsmasq-dns-7987f74bbc-9zsf6\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.073365 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-config\") pod \"dnsmasq-dns-7987f74bbc-9zsf6\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.073486 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-ovsdbserver-sb\") pod \"dnsmasq-dns-7987f74bbc-9zsf6\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.073958 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-dns-svc\") pod \"dnsmasq-dns-7987f74bbc-9zsf6\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.075885 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6803cf35-bd54-4b83-a5b1-42cae252f98d-config-data\") pod \"placement-db-sync-g6z8k\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " pod="openstack/placement-db-sync-g6z8k" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.087582 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-s6twc" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.092909 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-zvrbl" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.112202 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2n5s\" (UniqueName: \"kubernetes.io/projected/b1a828ce-0622-4f74-937b-4341f9795501-kube-api-access-s2n5s\") pod \"dnsmasq-dns-7987f74bbc-9zsf6\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.155856 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-g6z8k" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.173864 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-scripts\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.173923 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.173960 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.173983 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dlbl\" (UniqueName: \"kubernetes.io/projected/5b557d79-c22c-47a7-b460-d65c25c2bca8-kube-api-access-4dlbl\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.174022 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b557d79-c22c-47a7-b460-d65c25c2bca8-log-httpd\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.174048 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b557d79-c22c-47a7-b460-d65c25c2bca8-run-httpd\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.174066 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-config-data\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.177753 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b557d79-c22c-47a7-b460-d65c25c2bca8-log-httpd\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.178284 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b557d79-c22c-47a7-b460-d65c25c2bca8-run-httpd\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.179316 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-config-data\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.183792 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.184466 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-b867g" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.206660 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-scripts\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.209756 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.215115 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dlbl\" (UniqueName: \"kubernetes.io/projected/5b557d79-c22c-47a7-b460-d65c25c2bca8-kube-api-access-4dlbl\") pod \"ceilometer-0\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.284164 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.363879 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.551721 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8v4gc"] Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.685418 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6546db6db7-9twqd"] Nov 28 11:27:42 crc kubenswrapper[4923]: W1128 11:27:42.691318 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7c26baf_9942_4ffd_bfd4_b22b4a631307.slice/crio-be27a302ab452a9ff1b33392baf1eba2d4f5455598702d3ddce29f198df35bd2 WatchSource:0}: Error finding container be27a302ab452a9ff1b33392baf1eba2d4f5455598702d3ddce29f198df35bd2: Status 404 returned error can't find the container with id be27a302ab452a9ff1b33392baf1eba2d4f5455598702d3ddce29f198df35bd2 Nov 28 11:27:42 crc kubenswrapper[4923]: I1128 11:27:42.963789 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-g6z8k"] Nov 28 11:27:43 crc kubenswrapper[4923]: I1128 11:27:43.047032 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8v4gc" event={"ID":"c462cd46-ea0b-4567-a592-d43b436767e5","Type":"ContainerStarted","Data":"4b15e1097deecb6619c96df00fad4a648519b7888f64d9a3a2fecaa4d493067a"} Nov 28 11:27:43 crc kubenswrapper[4923]: I1128 11:27:43.053403 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-g6z8k" event={"ID":"6803cf35-bd54-4b83-a5b1-42cae252f98d","Type":"ContainerStarted","Data":"0afa88dba5db741d871387adfe50f94848c6ebaff0b7f04d22133683833b5744"} Nov 28 11:27:43 crc kubenswrapper[4923]: I1128 11:27:43.054563 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6546db6db7-9twqd" event={"ID":"a7c26baf-9942-4ffd-bfd4-b22b4a631307","Type":"ContainerStarted","Data":"be27a302ab452a9ff1b33392baf1eba2d4f5455598702d3ddce29f198df35bd2"} Nov 28 11:27:43 crc kubenswrapper[4923]: I1128 11:27:43.089468 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-s6twc"] Nov 28 11:27:43 crc kubenswrapper[4923]: I1128 11:27:43.102147 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7987f74bbc-9zsf6"] Nov 28 11:27:43 crc kubenswrapper[4923]: I1128 11:27:43.132727 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:27:43 crc kubenswrapper[4923]: W1128 11:27:43.185551 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b557d79_c22c_47a7_b460_d65c25c2bca8.slice/crio-dcff9cdb1a48423ca510ed75fc200d4c1f20c04e74ca02ec6d7b1627c5ec8ed4 WatchSource:0}: Error finding container dcff9cdb1a48423ca510ed75fc200d4c1f20c04e74ca02ec6d7b1627c5ec8ed4: Status 404 returned error can't find the container with id dcff9cdb1a48423ca510ed75fc200d4c1f20c04e74ca02ec6d7b1627c5ec8ed4 Nov 28 11:27:43 crc kubenswrapper[4923]: I1128 11:27:43.459397 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-b867g"] Nov 28 11:27:43 crc kubenswrapper[4923]: I1128 11:27:43.472358 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-zvrbl"] Nov 28 11:27:43 crc kubenswrapper[4923]: I1128 11:27:43.808122 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.027998 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.028034 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.070261 4923 generic.go:334] "Generic (PLEG): container finished" podID="a7c26baf-9942-4ffd-bfd4-b22b4a631307" containerID="00838f825e38a4f67a8d4c817d092353ff9dc921b6453e371169cf2207ba9771" exitCode=0 Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.070325 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6546db6db7-9twqd" event={"ID":"a7c26baf-9942-4ffd-bfd4-b22b4a631307","Type":"ContainerDied","Data":"00838f825e38a4f67a8d4c817d092353ff9dc921b6453e371169cf2207ba9771"} Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.077152 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8v4gc" event={"ID":"c462cd46-ea0b-4567-a592-d43b436767e5","Type":"ContainerStarted","Data":"a1f0106d44dc872d919ac5ba42c4dcec113aec34c0d39ab29a15fdb9693351f6"} Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.080883 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-zvrbl" event={"ID":"ab7a5c22-b1d8-49e8-9420-25485e5dabd7","Type":"ContainerStarted","Data":"d631b6e7278073e1c01f185892c7cc951eeb27d682c41809be4d3a9ba5ef9f10"} Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.080906 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-zvrbl" event={"ID":"ab7a5c22-b1d8-49e8-9420-25485e5dabd7","Type":"ContainerStarted","Data":"9d6b15780970d7af436062dd1ee70d7ff274ed566848b9040ac7340f3decf6ea"} Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.082200 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-s6twc" event={"ID":"4c7bc447-b1f7-4e68-b0da-310515aecea9","Type":"ContainerStarted","Data":"89f6e0510e742e0a877dbf339933cfb201113596e86cd286824092f591098acc"} Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.083087 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-b867g" event={"ID":"1813822f-07d2-4a68-98bf-26cf5edd6707","Type":"ContainerStarted","Data":"48704da40d1c67e86667ac5e14171583b066986ab9e16bd867eeb48a187b1be4"} Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.111506 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b557d79-c22c-47a7-b460-d65c25c2bca8","Type":"ContainerStarted","Data":"dcff9cdb1a48423ca510ed75fc200d4c1f20c04e74ca02ec6d7b1627c5ec8ed4"} Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.113491 4923 generic.go:334] "Generic (PLEG): container finished" podID="b1a828ce-0622-4f74-937b-4341f9795501" containerID="e98a43c2efd680b1f3408870f7b66c3d65b8514cb7499f2b890569825c2c10da" exitCode=0 Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.113515 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" event={"ID":"b1a828ce-0622-4f74-937b-4341f9795501","Type":"ContainerDied","Data":"e98a43c2efd680b1f3408870f7b66c3d65b8514cb7499f2b890569825c2c10da"} Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.113528 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" event={"ID":"b1a828ce-0622-4f74-937b-4341f9795501","Type":"ContainerStarted","Data":"8d6b9f803496115d1a0e0bcbc0f032afca6d7d45f1ce8d53fc435628e61b81e5"} Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.114328 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-8v4gc" podStartSLOduration=3.114314525 podStartE2EDuration="3.114314525s" podCreationTimestamp="2025-11-28 11:27:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:27:44.103827518 +0000 UTC m=+1143.232511728" watchObservedRunningTime="2025-11-28 11:27:44.114314525 +0000 UTC m=+1143.242998735" Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.152084 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-zvrbl" podStartSLOduration=3.152065064 podStartE2EDuration="3.152065064s" podCreationTimestamp="2025-11-28 11:27:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:27:44.119419969 +0000 UTC m=+1143.248104179" watchObservedRunningTime="2025-11-28 11:27:44.152065064 +0000 UTC m=+1143.280749274" Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.458413 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.521996 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-87dm4\" (UniqueName: \"kubernetes.io/projected/a7c26baf-9942-4ffd-bfd4-b22b4a631307-kube-api-access-87dm4\") pod \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.522153 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-ovsdbserver-sb\") pod \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.522216 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-dns-svc\") pod \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.522243 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-ovsdbserver-nb\") pod \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.522321 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-config\") pod \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\" (UID: \"a7c26baf-9942-4ffd-bfd4-b22b4a631307\") " Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.527389 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7c26baf-9942-4ffd-bfd4-b22b4a631307-kube-api-access-87dm4" (OuterVolumeSpecName: "kube-api-access-87dm4") pod "a7c26baf-9942-4ffd-bfd4-b22b4a631307" (UID: "a7c26baf-9942-4ffd-bfd4-b22b4a631307"). InnerVolumeSpecName "kube-api-access-87dm4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.546098 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-config" (OuterVolumeSpecName: "config") pod "a7c26baf-9942-4ffd-bfd4-b22b4a631307" (UID: "a7c26baf-9942-4ffd-bfd4-b22b4a631307"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.562501 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a7c26baf-9942-4ffd-bfd4-b22b4a631307" (UID: "a7c26baf-9942-4ffd-bfd4-b22b4a631307"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.562558 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a7c26baf-9942-4ffd-bfd4-b22b4a631307" (UID: "a7c26baf-9942-4ffd-bfd4-b22b4a631307"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.566415 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a7c26baf-9942-4ffd-bfd4-b22b4a631307" (UID: "a7c26baf-9942-4ffd-bfd4-b22b4a631307"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.624342 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-87dm4\" (UniqueName: \"kubernetes.io/projected/a7c26baf-9942-4ffd-bfd4-b22b4a631307-kube-api-access-87dm4\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.624586 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.624600 4923 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.624608 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:44 crc kubenswrapper[4923]: I1128 11:27:44.624617 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7c26baf-9942-4ffd-bfd4-b22b4a631307-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:45 crc kubenswrapper[4923]: I1128 11:27:45.167795 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" event={"ID":"b1a828ce-0622-4f74-937b-4341f9795501","Type":"ContainerStarted","Data":"29e3641c4f6b0a9e94db8d56755d610f4bc6a2cf92e3ad1ee60195b37b1e3e19"} Nov 28 11:27:45 crc kubenswrapper[4923]: I1128 11:27:45.168802 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:45 crc kubenswrapper[4923]: I1128 11:27:45.174856 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6546db6db7-9twqd" Nov 28 11:27:45 crc kubenswrapper[4923]: I1128 11:27:45.189279 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" podStartSLOduration=4.189245841 podStartE2EDuration="4.189245841s" podCreationTimestamp="2025-11-28 11:27:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:27:45.186652838 +0000 UTC m=+1144.315337058" watchObservedRunningTime="2025-11-28 11:27:45.189245841 +0000 UTC m=+1144.317930071" Nov 28 11:27:45 crc kubenswrapper[4923]: I1128 11:27:45.196961 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6546db6db7-9twqd" event={"ID":"a7c26baf-9942-4ffd-bfd4-b22b4a631307","Type":"ContainerDied","Data":"be27a302ab452a9ff1b33392baf1eba2d4f5455598702d3ddce29f198df35bd2"} Nov 28 11:27:45 crc kubenswrapper[4923]: I1128 11:27:45.197075 4923 scope.go:117] "RemoveContainer" containerID="00838f825e38a4f67a8d4c817d092353ff9dc921b6453e371169cf2207ba9771" Nov 28 11:27:45 crc kubenswrapper[4923]: I1128 11:27:45.260727 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6546db6db7-9twqd"] Nov 28 11:27:45 crc kubenswrapper[4923]: I1128 11:27:45.267428 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6546db6db7-9twqd"] Nov 28 11:27:47 crc kubenswrapper[4923]: I1128 11:27:47.185103 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7c26baf-9942-4ffd-bfd4-b22b4a631307" path="/var/lib/kubelet/pods/a7c26baf-9942-4ffd-bfd4-b22b4a631307/volumes" Nov 28 11:27:49 crc kubenswrapper[4923]: I1128 11:27:49.211562 4923 generic.go:334] "Generic (PLEG): container finished" podID="c462cd46-ea0b-4567-a592-d43b436767e5" containerID="a1f0106d44dc872d919ac5ba42c4dcec113aec34c0d39ab29a15fdb9693351f6" exitCode=0 Nov 28 11:27:49 crc kubenswrapper[4923]: I1128 11:27:49.211695 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8v4gc" event={"ID":"c462cd46-ea0b-4567-a592-d43b436767e5","Type":"ContainerDied","Data":"a1f0106d44dc872d919ac5ba42c4dcec113aec34c0d39ab29a15fdb9693351f6"} Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.286113 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.351575 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54f9b7b8d9-d9ssw"] Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.351782 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" podUID="136fe185-8544-42b9-92df-b0c42d04a4fb" containerName="dnsmasq-dns" containerID="cri-o://7769ff15375ac0ae74c80e592f829f4efcbe158a002c162c498c8ae893bd286e" gracePeriod=10 Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.471157 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.585385 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-config-data\") pod \"c462cd46-ea0b-4567-a592-d43b436767e5\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.585531 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-credential-keys\") pod \"c462cd46-ea0b-4567-a592-d43b436767e5\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.585557 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7bffr\" (UniqueName: \"kubernetes.io/projected/c462cd46-ea0b-4567-a592-d43b436767e5-kube-api-access-7bffr\") pod \"c462cd46-ea0b-4567-a592-d43b436767e5\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.585629 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-fernet-keys\") pod \"c462cd46-ea0b-4567-a592-d43b436767e5\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.585647 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-combined-ca-bundle\") pod \"c462cd46-ea0b-4567-a592-d43b436767e5\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.585722 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-scripts\") pod \"c462cd46-ea0b-4567-a592-d43b436767e5\" (UID: \"c462cd46-ea0b-4567-a592-d43b436767e5\") " Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.592037 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "c462cd46-ea0b-4567-a592-d43b436767e5" (UID: "c462cd46-ea0b-4567-a592-d43b436767e5"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.594157 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c462cd46-ea0b-4567-a592-d43b436767e5-kube-api-access-7bffr" (OuterVolumeSpecName: "kube-api-access-7bffr") pod "c462cd46-ea0b-4567-a592-d43b436767e5" (UID: "c462cd46-ea0b-4567-a592-d43b436767e5"). InnerVolumeSpecName "kube-api-access-7bffr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.591924 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-scripts" (OuterVolumeSpecName: "scripts") pod "c462cd46-ea0b-4567-a592-d43b436767e5" (UID: "c462cd46-ea0b-4567-a592-d43b436767e5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.607024 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "c462cd46-ea0b-4567-a592-d43b436767e5" (UID: "c462cd46-ea0b-4567-a592-d43b436767e5"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.617450 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c462cd46-ea0b-4567-a592-d43b436767e5" (UID: "c462cd46-ea0b-4567-a592-d43b436767e5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.624265 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-config-data" (OuterVolumeSpecName: "config-data") pod "c462cd46-ea0b-4567-a592-d43b436767e5" (UID: "c462cd46-ea0b-4567-a592-d43b436767e5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.688077 4923 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.688109 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.688120 4923 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.688132 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7bffr\" (UniqueName: \"kubernetes.io/projected/c462cd46-ea0b-4567-a592-d43b436767e5-kube-api-access-7bffr\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.688140 4923 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:52 crc kubenswrapper[4923]: I1128 11:27:52.688147 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c462cd46-ea0b-4567-a592-d43b436767e5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.266749 4923 generic.go:334] "Generic (PLEG): container finished" podID="136fe185-8544-42b9-92df-b0c42d04a4fb" containerID="7769ff15375ac0ae74c80e592f829f4efcbe158a002c162c498c8ae893bd286e" exitCode=0 Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.266949 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" event={"ID":"136fe185-8544-42b9-92df-b0c42d04a4fb","Type":"ContainerDied","Data":"7769ff15375ac0ae74c80e592f829f4efcbe158a002c162c498c8ae893bd286e"} Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.273413 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8v4gc" event={"ID":"c462cd46-ea0b-4567-a592-d43b436767e5","Type":"ContainerDied","Data":"4b15e1097deecb6619c96df00fad4a648519b7888f64d9a3a2fecaa4d493067a"} Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.273452 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4b15e1097deecb6619c96df00fad4a648519b7888f64d9a3a2fecaa4d493067a" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.273510 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8v4gc" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.582041 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-8v4gc"] Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.582095 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-8v4gc"] Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.691455 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-dfhlh"] Nov 28 11:27:53 crc kubenswrapper[4923]: E1128 11:27:53.692050 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c462cd46-ea0b-4567-a592-d43b436767e5" containerName="keystone-bootstrap" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.692145 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="c462cd46-ea0b-4567-a592-d43b436767e5" containerName="keystone-bootstrap" Nov 28 11:27:53 crc kubenswrapper[4923]: E1128 11:27:53.692243 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7c26baf-9942-4ffd-bfd4-b22b4a631307" containerName="init" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.692313 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7c26baf-9942-4ffd-bfd4-b22b4a631307" containerName="init" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.692553 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="c462cd46-ea0b-4567-a592-d43b436767e5" containerName="keystone-bootstrap" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.692686 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7c26baf-9942-4ffd-bfd4-b22b4a631307" containerName="init" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.693307 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.701097 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-4f87p" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.701272 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.701221 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.701611 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.701795 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.739907 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-dfhlh"] Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.806983 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-config-data\") pod \"keystone-bootstrap-dfhlh\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.807017 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-combined-ca-bundle\") pod \"keystone-bootstrap-dfhlh\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.807060 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-scripts\") pod \"keystone-bootstrap-dfhlh\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.807091 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-fernet-keys\") pod \"keystone-bootstrap-dfhlh\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.807135 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-credential-keys\") pod \"keystone-bootstrap-dfhlh\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.807152 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jlfn\" (UniqueName: \"kubernetes.io/projected/4173396e-acf1-469e-9c63-4a02a2a1692b-kube-api-access-7jlfn\") pod \"keystone-bootstrap-dfhlh\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.908412 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-config-data\") pod \"keystone-bootstrap-dfhlh\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.908458 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-combined-ca-bundle\") pod \"keystone-bootstrap-dfhlh\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.908506 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-scripts\") pod \"keystone-bootstrap-dfhlh\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.908543 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-fernet-keys\") pod \"keystone-bootstrap-dfhlh\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.908595 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-credential-keys\") pod \"keystone-bootstrap-dfhlh\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.908616 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jlfn\" (UniqueName: \"kubernetes.io/projected/4173396e-acf1-469e-9c63-4a02a2a1692b-kube-api-access-7jlfn\") pod \"keystone-bootstrap-dfhlh\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.912729 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-scripts\") pod \"keystone-bootstrap-dfhlh\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.916581 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-fernet-keys\") pod \"keystone-bootstrap-dfhlh\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.916691 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-combined-ca-bundle\") pod \"keystone-bootstrap-dfhlh\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.933005 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jlfn\" (UniqueName: \"kubernetes.io/projected/4173396e-acf1-469e-9c63-4a02a2a1692b-kube-api-access-7jlfn\") pod \"keystone-bootstrap-dfhlh\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.936630 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-credential-keys\") pod \"keystone-bootstrap-dfhlh\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:53 crc kubenswrapper[4923]: I1128 11:27:53.958724 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-config-data\") pod \"keystone-bootstrap-dfhlh\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:54 crc kubenswrapper[4923]: I1128 11:27:54.080982 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:27:54 crc kubenswrapper[4923]: I1128 11:27:54.700769 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" podUID="136fe185-8544-42b9-92df-b0c42d04a4fb" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: connect: connection refused" Nov 28 11:27:55 crc kubenswrapper[4923]: I1128 11:27:55.178441 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c462cd46-ea0b-4567-a592-d43b436767e5" path="/var/lib/kubelet/pods/c462cd46-ea0b-4567-a592-d43b436767e5/volumes" Nov 28 11:27:59 crc kubenswrapper[4923]: I1128 11:27:59.700033 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" podUID="136fe185-8544-42b9-92df-b0c42d04a4fb" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: connect: connection refused" Nov 28 11:28:02 crc kubenswrapper[4923]: I1128 11:28:02.354601 4923 generic.go:334] "Generic (PLEG): container finished" podID="ab7a5c22-b1d8-49e8-9420-25485e5dabd7" containerID="d631b6e7278073e1c01f185892c7cc951eeb27d682c41809be4d3a9ba5ef9f10" exitCode=0 Nov 28 11:28:02 crc kubenswrapper[4923]: I1128 11:28:02.354677 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-zvrbl" event={"ID":"ab7a5c22-b1d8-49e8-9420-25485e5dabd7","Type":"ContainerDied","Data":"d631b6e7278073e1c01f185892c7cc951eeb27d682c41809be4d3a9ba5ef9f10"} Nov 28 11:28:03 crc kubenswrapper[4923]: E1128 11:28:03.769870 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 28 11:28:03 crc kubenswrapper[4923]: E1128 11:28:03.770335 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qldm2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-s6twc_openstack(4c7bc447-b1f7-4e68-b0da-310515aecea9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 11:28:03 crc kubenswrapper[4923]: E1128 11:28:03.771541 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-s6twc" podUID="4c7bc447-b1f7-4e68-b0da-310515aecea9" Nov 28 11:28:04 crc kubenswrapper[4923]: E1128 11:28:04.379406 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-s6twc" podUID="4c7bc447-b1f7-4e68-b0da-310515aecea9" Nov 28 11:28:04 crc kubenswrapper[4923]: E1128 11:28:04.635906 4923 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 28 11:28:04 crc kubenswrapper[4923]: E1128 11:28:04.636398 4923 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xr57z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-b867g_openstack(1813822f-07d2-4a68-98bf-26cf5edd6707): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 11:28:04 crc kubenswrapper[4923]: E1128 11:28:04.637875 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-b867g" podUID="1813822f-07d2-4a68-98bf-26cf5edd6707" Nov 28 11:28:04 crc kubenswrapper[4923]: I1128 11:28:04.868447 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-zvrbl" Nov 28 11:28:04 crc kubenswrapper[4923]: I1128 11:28:04.925061 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cbrsc\" (UniqueName: \"kubernetes.io/projected/ab7a5c22-b1d8-49e8-9420-25485e5dabd7-kube-api-access-cbrsc\") pod \"ab7a5c22-b1d8-49e8-9420-25485e5dabd7\" (UID: \"ab7a5c22-b1d8-49e8-9420-25485e5dabd7\") " Nov 28 11:28:04 crc kubenswrapper[4923]: I1128 11:28:04.925111 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab7a5c22-b1d8-49e8-9420-25485e5dabd7-combined-ca-bundle\") pod \"ab7a5c22-b1d8-49e8-9420-25485e5dabd7\" (UID: \"ab7a5c22-b1d8-49e8-9420-25485e5dabd7\") " Nov 28 11:28:04 crc kubenswrapper[4923]: I1128 11:28:04.925166 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ab7a5c22-b1d8-49e8-9420-25485e5dabd7-config\") pod \"ab7a5c22-b1d8-49e8-9420-25485e5dabd7\" (UID: \"ab7a5c22-b1d8-49e8-9420-25485e5dabd7\") " Nov 28 11:28:04 crc kubenswrapper[4923]: I1128 11:28:04.928788 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab7a5c22-b1d8-49e8-9420-25485e5dabd7-kube-api-access-cbrsc" (OuterVolumeSpecName: "kube-api-access-cbrsc") pod "ab7a5c22-b1d8-49e8-9420-25485e5dabd7" (UID: "ab7a5c22-b1d8-49e8-9420-25485e5dabd7"). InnerVolumeSpecName "kube-api-access-cbrsc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:28:04 crc kubenswrapper[4923]: I1128 11:28:04.952474 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab7a5c22-b1d8-49e8-9420-25485e5dabd7-config" (OuterVolumeSpecName: "config") pod "ab7a5c22-b1d8-49e8-9420-25485e5dabd7" (UID: "ab7a5c22-b1d8-49e8-9420-25485e5dabd7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:04 crc kubenswrapper[4923]: I1128 11:28:04.968966 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab7a5c22-b1d8-49e8-9420-25485e5dabd7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ab7a5c22-b1d8-49e8-9420-25485e5dabd7" (UID: "ab7a5c22-b1d8-49e8-9420-25485e5dabd7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.027024 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab7a5c22-b1d8-49e8-9420-25485e5dabd7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.027287 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/ab7a5c22-b1d8-49e8-9420-25485e5dabd7-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.027390 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cbrsc\" (UniqueName: \"kubernetes.io/projected/ab7a5c22-b1d8-49e8-9420-25485e5dabd7-kube-api-access-cbrsc\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.040287 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.096044 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-dfhlh"] Nov 28 11:28:05 crc kubenswrapper[4923]: W1128 11:28:05.099076 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4173396e_acf1_469e_9c63_4a02a2a1692b.slice/crio-c996374e0606ae2190204f1ddd18817dafd652109aa23b034129f0511ea6bb2f WatchSource:0}: Error finding container c996374e0606ae2190204f1ddd18817dafd652109aa23b034129f0511ea6bb2f: Status 404 returned error can't find the container with id c996374e0606ae2190204f1ddd18817dafd652109aa23b034129f0511ea6bb2f Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.128903 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-config\") pod \"136fe185-8544-42b9-92df-b0c42d04a4fb\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.129002 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dll2f\" (UniqueName: \"kubernetes.io/projected/136fe185-8544-42b9-92df-b0c42d04a4fb-kube-api-access-dll2f\") pod \"136fe185-8544-42b9-92df-b0c42d04a4fb\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.129354 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-dns-svc\") pod \"136fe185-8544-42b9-92df-b0c42d04a4fb\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.129454 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-ovsdbserver-nb\") pod \"136fe185-8544-42b9-92df-b0c42d04a4fb\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.129493 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-ovsdbserver-sb\") pod \"136fe185-8544-42b9-92df-b0c42d04a4fb\" (UID: \"136fe185-8544-42b9-92df-b0c42d04a4fb\") " Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.140029 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/136fe185-8544-42b9-92df-b0c42d04a4fb-kube-api-access-dll2f" (OuterVolumeSpecName: "kube-api-access-dll2f") pod "136fe185-8544-42b9-92df-b0c42d04a4fb" (UID: "136fe185-8544-42b9-92df-b0c42d04a4fb"). InnerVolumeSpecName "kube-api-access-dll2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.177581 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "136fe185-8544-42b9-92df-b0c42d04a4fb" (UID: "136fe185-8544-42b9-92df-b0c42d04a4fb"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.216614 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "136fe185-8544-42b9-92df-b0c42d04a4fb" (UID: "136fe185-8544-42b9-92df-b0c42d04a4fb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.221060 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "136fe185-8544-42b9-92df-b0c42d04a4fb" (UID: "136fe185-8544-42b9-92df-b0c42d04a4fb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.224345 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-config" (OuterVolumeSpecName: "config") pod "136fe185-8544-42b9-92df-b0c42d04a4fb" (UID: "136fe185-8544-42b9-92df-b0c42d04a4fb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.230890 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.230915 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.230926 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.230951 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dll2f\" (UniqueName: \"kubernetes.io/projected/136fe185-8544-42b9-92df-b0c42d04a4fb-kube-api-access-dll2f\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.230961 4923 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/136fe185-8544-42b9-92df-b0c42d04a4fb-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.383577 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dfhlh" event={"ID":"4173396e-acf1-469e-9c63-4a02a2a1692b","Type":"ContainerStarted","Data":"bec9a5a63c2d24eff6a5181fa8d4b2bd83d6de38ee6453578c0aedfb3b3adb2a"} Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.383649 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dfhlh" event={"ID":"4173396e-acf1-469e-9c63-4a02a2a1692b","Type":"ContainerStarted","Data":"c996374e0606ae2190204f1ddd18817dafd652109aa23b034129f0511ea6bb2f"} Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.385453 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-zvrbl" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.385456 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-zvrbl" event={"ID":"ab7a5c22-b1d8-49e8-9420-25485e5dabd7","Type":"ContainerDied","Data":"9d6b15780970d7af436062dd1ee70d7ff274ed566848b9040ac7340f3decf6ea"} Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.385486 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d6b15780970d7af436062dd1ee70d7ff274ed566848b9040ac7340f3decf6ea" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.388964 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b557d79-c22c-47a7-b460-d65c25c2bca8","Type":"ContainerStarted","Data":"9e974fec2fa94132da5509cd0d195f51c5708539f7a12beba354675cbebf9583"} Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.390521 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-g6z8k" event={"ID":"6803cf35-bd54-4b83-a5b1-42cae252f98d","Type":"ContainerStarted","Data":"12f53f24d35abec23848f3fd1f0a37dcfe9e4e2513abc3bf52ccfbe8e27edf78"} Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.392460 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" event={"ID":"136fe185-8544-42b9-92df-b0c42d04a4fb","Type":"ContainerDied","Data":"5bc72763e16a05cfac23ebfe71828dd0828958a725f2e80d2b299e60024b2e27"} Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.392497 4923 scope.go:117] "RemoveContainer" containerID="7769ff15375ac0ae74c80e592f829f4efcbe158a002c162c498c8ae893bd286e" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.392507 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" Nov 28 11:28:05 crc kubenswrapper[4923]: E1128 11:28:05.393725 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-b867g" podUID="1813822f-07d2-4a68-98bf-26cf5edd6707" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.401326 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-dfhlh" podStartSLOduration=12.401307059 podStartE2EDuration="12.401307059s" podCreationTimestamp="2025-11-28 11:27:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:28:05.399916329 +0000 UTC m=+1164.528600549" watchObservedRunningTime="2025-11-28 11:28:05.401307059 +0000 UTC m=+1164.529991269" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.419385 4923 scope.go:117] "RemoveContainer" containerID="0734859dc5bd26d8f5de06618e946591cec15cb880f123aff332d122a5a500dc" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.462633 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-g6z8k" podStartSLOduration=2.808008989 podStartE2EDuration="24.462614295s" podCreationTimestamp="2025-11-28 11:27:41 +0000 UTC" firstStartedPulling="2025-11-28 11:27:42.962541633 +0000 UTC m=+1142.091225843" lastFinishedPulling="2025-11-28 11:28:04.617146909 +0000 UTC m=+1163.745831149" observedRunningTime="2025-11-28 11:28:05.442224478 +0000 UTC m=+1164.570908688" watchObservedRunningTime="2025-11-28 11:28:05.462614295 +0000 UTC m=+1164.591298515" Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.472182 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54f9b7b8d9-d9ssw"] Nov 28 11:28:05 crc kubenswrapper[4923]: I1128 11:28:05.478402 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-54f9b7b8d9-d9ssw"] Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.013233 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7b946d459c-b9ggp"] Nov 28 11:28:06 crc kubenswrapper[4923]: E1128 11:28:06.014014 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="136fe185-8544-42b9-92df-b0c42d04a4fb" containerName="init" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.014028 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="136fe185-8544-42b9-92df-b0c42d04a4fb" containerName="init" Nov 28 11:28:06 crc kubenswrapper[4923]: E1128 11:28:06.014039 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab7a5c22-b1d8-49e8-9420-25485e5dabd7" containerName="neutron-db-sync" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.014045 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab7a5c22-b1d8-49e8-9420-25485e5dabd7" containerName="neutron-db-sync" Nov 28 11:28:06 crc kubenswrapper[4923]: E1128 11:28:06.014059 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="136fe185-8544-42b9-92df-b0c42d04a4fb" containerName="dnsmasq-dns" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.014073 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="136fe185-8544-42b9-92df-b0c42d04a4fb" containerName="dnsmasq-dns" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.014247 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="136fe185-8544-42b9-92df-b0c42d04a4fb" containerName="dnsmasq-dns" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.014264 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab7a5c22-b1d8-49e8-9420-25485e5dabd7" containerName="neutron-db-sync" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.015163 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.041799 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7b946d459c-b9ggp"] Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.147798 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-ovsdbserver-sb\") pod \"dnsmasq-dns-7b946d459c-b9ggp\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.147841 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-dns-svc\") pod \"dnsmasq-dns-7b946d459c-b9ggp\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.147885 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-ovsdbserver-nb\") pod \"dnsmasq-dns-7b946d459c-b9ggp\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.148020 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-config\") pod \"dnsmasq-dns-7b946d459c-b9ggp\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.148218 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgxhd\" (UniqueName: \"kubernetes.io/projected/73b329c5-451d-4df5-9c61-da9706ee5d52-kube-api-access-rgxhd\") pod \"dnsmasq-dns-7b946d459c-b9ggp\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.184616 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-57876647b-g86pl"] Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.185876 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.187723 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.188115 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-275ps" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.188248 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.188366 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.212100 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-57876647b-g86pl"] Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.249850 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hs6hc\" (UniqueName: \"kubernetes.io/projected/322dfb9b-c79f-4d58-96eb-265da89196f4-kube-api-access-hs6hc\") pod \"neutron-57876647b-g86pl\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.249987 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-httpd-config\") pod \"neutron-57876647b-g86pl\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.250015 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgxhd\" (UniqueName: \"kubernetes.io/projected/73b329c5-451d-4df5-9c61-da9706ee5d52-kube-api-access-rgxhd\") pod \"dnsmasq-dns-7b946d459c-b9ggp\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.250058 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-combined-ca-bundle\") pod \"neutron-57876647b-g86pl\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.250074 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-ovndb-tls-certs\") pod \"neutron-57876647b-g86pl\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.250134 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-config\") pod \"neutron-57876647b-g86pl\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.250169 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-ovsdbserver-sb\") pod \"dnsmasq-dns-7b946d459c-b9ggp\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.250189 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-dns-svc\") pod \"dnsmasq-dns-7b946d459c-b9ggp\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.250245 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-ovsdbserver-nb\") pod \"dnsmasq-dns-7b946d459c-b9ggp\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.250264 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-config\") pod \"dnsmasq-dns-7b946d459c-b9ggp\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.251949 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-ovsdbserver-nb\") pod \"dnsmasq-dns-7b946d459c-b9ggp\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.252141 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-dns-svc\") pod \"dnsmasq-dns-7b946d459c-b9ggp\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.252573 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-ovsdbserver-sb\") pod \"dnsmasq-dns-7b946d459c-b9ggp\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.252728 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-config\") pod \"dnsmasq-dns-7b946d459c-b9ggp\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.308401 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgxhd\" (UniqueName: \"kubernetes.io/projected/73b329c5-451d-4df5-9c61-da9706ee5d52-kube-api-access-rgxhd\") pod \"dnsmasq-dns-7b946d459c-b9ggp\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.339747 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.352278 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hs6hc\" (UniqueName: \"kubernetes.io/projected/322dfb9b-c79f-4d58-96eb-265da89196f4-kube-api-access-hs6hc\") pod \"neutron-57876647b-g86pl\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.352333 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-httpd-config\") pod \"neutron-57876647b-g86pl\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.352370 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-combined-ca-bundle\") pod \"neutron-57876647b-g86pl\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.352385 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-ovndb-tls-certs\") pod \"neutron-57876647b-g86pl\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.352439 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-config\") pod \"neutron-57876647b-g86pl\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.357559 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-combined-ca-bundle\") pod \"neutron-57876647b-g86pl\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.358527 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-config\") pod \"neutron-57876647b-g86pl\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.360309 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-ovndb-tls-certs\") pod \"neutron-57876647b-g86pl\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.378304 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hs6hc\" (UniqueName: \"kubernetes.io/projected/322dfb9b-c79f-4d58-96eb-265da89196f4-kube-api-access-hs6hc\") pod \"neutron-57876647b-g86pl\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.381664 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-httpd-config\") pod \"neutron-57876647b-g86pl\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.518782 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:06 crc kubenswrapper[4923]: I1128 11:28:06.827040 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7b946d459c-b9ggp"] Nov 28 11:28:07 crc kubenswrapper[4923]: I1128 11:28:07.187472 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="136fe185-8544-42b9-92df-b0c42d04a4fb" path="/var/lib/kubelet/pods/136fe185-8544-42b9-92df-b0c42d04a4fb/volumes" Nov 28 11:28:07 crc kubenswrapper[4923]: I1128 11:28:07.427075 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" event={"ID":"73b329c5-451d-4df5-9c61-da9706ee5d52","Type":"ContainerStarted","Data":"2ea29aecce4858eb4c0f0f5c84cb5f126298e8591683f22100c16ba87d1f8055"} Nov 28 11:28:07 crc kubenswrapper[4923]: I1128 11:28:07.495981 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-57876647b-g86pl"] Nov 28 11:28:07 crc kubenswrapper[4923]: W1128 11:28:07.508410 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod322dfb9b_c79f_4d58_96eb_265da89196f4.slice/crio-acdd6a8cd5329c7edb3150653fd5d12972f4ed6f830cde249bc85955ce13756e WatchSource:0}: Error finding container acdd6a8cd5329c7edb3150653fd5d12972f4ed6f830cde249bc85955ce13756e: Status 404 returned error can't find the container with id acdd6a8cd5329c7edb3150653fd5d12972f4ed6f830cde249bc85955ce13756e Nov 28 11:28:08 crc kubenswrapper[4923]: I1128 11:28:08.440106 4923 generic.go:334] "Generic (PLEG): container finished" podID="73b329c5-451d-4df5-9c61-da9706ee5d52" containerID="41a1b39660767d19c4d056837548439d65006a5a6750c72bb01e969231d2b3fe" exitCode=0 Nov 28 11:28:08 crc kubenswrapper[4923]: I1128 11:28:08.440969 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" event={"ID":"73b329c5-451d-4df5-9c61-da9706ee5d52","Type":"ContainerDied","Data":"41a1b39660767d19c4d056837548439d65006a5a6750c72bb01e969231d2b3fe"} Nov 28 11:28:08 crc kubenswrapper[4923]: I1128 11:28:08.447582 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-57876647b-g86pl" event={"ID":"322dfb9b-c79f-4d58-96eb-265da89196f4","Type":"ContainerStarted","Data":"18134142dd802445c28c83de513c0e68371687ade17d40fc5952f5934c47d926"} Nov 28 11:28:08 crc kubenswrapper[4923]: I1128 11:28:08.447609 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-57876647b-g86pl" event={"ID":"322dfb9b-c79f-4d58-96eb-265da89196f4","Type":"ContainerStarted","Data":"4c349486c4d323a0c1525ced697c1addeeda2efe88a583d486bc07c71d95db8a"} Nov 28 11:28:08 crc kubenswrapper[4923]: I1128 11:28:08.447618 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-57876647b-g86pl" event={"ID":"322dfb9b-c79f-4d58-96eb-265da89196f4","Type":"ContainerStarted","Data":"acdd6a8cd5329c7edb3150653fd5d12972f4ed6f830cde249bc85955ce13756e"} Nov 28 11:28:08 crc kubenswrapper[4923]: I1128 11:28:08.448292 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:08 crc kubenswrapper[4923]: I1128 11:28:08.449673 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b557d79-c22c-47a7-b460-d65c25c2bca8","Type":"ContainerStarted","Data":"5dab8389a161c430125def1dacd522708b542950beb371ccc60924982d792314"} Nov 28 11:28:08 crc kubenswrapper[4923]: I1128 11:28:08.451243 4923 generic.go:334] "Generic (PLEG): container finished" podID="6803cf35-bd54-4b83-a5b1-42cae252f98d" containerID="12f53f24d35abec23848f3fd1f0a37dcfe9e4e2513abc3bf52ccfbe8e27edf78" exitCode=0 Nov 28 11:28:08 crc kubenswrapper[4923]: I1128 11:28:08.451268 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-g6z8k" event={"ID":"6803cf35-bd54-4b83-a5b1-42cae252f98d","Type":"ContainerDied","Data":"12f53f24d35abec23848f3fd1f0a37dcfe9e4e2513abc3bf52ccfbe8e27edf78"} Nov 28 11:28:08 crc kubenswrapper[4923]: I1128 11:28:08.483297 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-57876647b-g86pl" podStartSLOduration=2.483283202 podStartE2EDuration="2.483283202s" podCreationTimestamp="2025-11-28 11:28:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:28:08.478467416 +0000 UTC m=+1167.607151616" watchObservedRunningTime="2025-11-28 11:28:08.483283202 +0000 UTC m=+1167.611967412" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.015862 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5fbc45745-sfgcl"] Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.017402 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.024274 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.024426 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.026323 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5fbc45745-sfgcl"] Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.105735 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-httpd-config\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.105799 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-combined-ca-bundle\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.105909 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-internal-tls-certs\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.106067 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-ovndb-tls-certs\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.106238 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-config\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.106392 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zm5fn\" (UniqueName: \"kubernetes.io/projected/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-kube-api-access-zm5fn\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.106437 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-public-tls-certs\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.207813 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-config\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.207887 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zm5fn\" (UniqueName: \"kubernetes.io/projected/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-kube-api-access-zm5fn\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.207918 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-public-tls-certs\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.207961 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-httpd-config\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.207997 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-combined-ca-bundle\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.208018 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-internal-tls-certs\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.208050 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-ovndb-tls-certs\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.214056 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-public-tls-certs\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.214297 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-combined-ca-bundle\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.215301 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-httpd-config\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.216056 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-config\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.219199 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-ovndb-tls-certs\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.230249 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-internal-tls-certs\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.233586 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zm5fn\" (UniqueName: \"kubernetes.io/projected/71e4dc03-02ac-4443-ad2d-9d47f3f1457b-kube-api-access-zm5fn\") pod \"neutron-5fbc45745-sfgcl\" (UID: \"71e4dc03-02ac-4443-ad2d-9d47f3f1457b\") " pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.369348 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.464537 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" event={"ID":"73b329c5-451d-4df5-9c61-da9706ee5d52","Type":"ContainerStarted","Data":"5a0cebbe8280150d56107e83737cca6ba9f9fd5ba02b0fb12f54bc8903de7217"} Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.504726 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" podStartSLOduration=4.504708903 podStartE2EDuration="4.504708903s" podCreationTimestamp="2025-11-28 11:28:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:28:09.483201341 +0000 UTC m=+1168.611885561" watchObservedRunningTime="2025-11-28 11:28:09.504708903 +0000 UTC m=+1168.633393113" Nov 28 11:28:09 crc kubenswrapper[4923]: I1128 11:28:09.701120 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-54f9b7b8d9-d9ssw" podUID="136fe185-8544-42b9-92df-b0c42d04a4fb" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: i/o timeout" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:09.878583 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-g6z8k" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:09.922503 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6803cf35-bd54-4b83-a5b1-42cae252f98d-logs\") pod \"6803cf35-bd54-4b83-a5b1-42cae252f98d\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:09.922575 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ktjcf\" (UniqueName: \"kubernetes.io/projected/6803cf35-bd54-4b83-a5b1-42cae252f98d-kube-api-access-ktjcf\") pod \"6803cf35-bd54-4b83-a5b1-42cae252f98d\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:09.922644 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6803cf35-bd54-4b83-a5b1-42cae252f98d-combined-ca-bundle\") pod \"6803cf35-bd54-4b83-a5b1-42cae252f98d\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:09.922669 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6803cf35-bd54-4b83-a5b1-42cae252f98d-config-data\") pod \"6803cf35-bd54-4b83-a5b1-42cae252f98d\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:09.922715 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6803cf35-bd54-4b83-a5b1-42cae252f98d-scripts\") pod \"6803cf35-bd54-4b83-a5b1-42cae252f98d\" (UID: \"6803cf35-bd54-4b83-a5b1-42cae252f98d\") " Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:09.922952 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6803cf35-bd54-4b83-a5b1-42cae252f98d-logs" (OuterVolumeSpecName: "logs") pod "6803cf35-bd54-4b83-a5b1-42cae252f98d" (UID: "6803cf35-bd54-4b83-a5b1-42cae252f98d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:09.923031 4923 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6803cf35-bd54-4b83-a5b1-42cae252f98d-logs\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:09.931584 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6803cf35-bd54-4b83-a5b1-42cae252f98d-scripts" (OuterVolumeSpecName: "scripts") pod "6803cf35-bd54-4b83-a5b1-42cae252f98d" (UID: "6803cf35-bd54-4b83-a5b1-42cae252f98d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:09.938571 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6803cf35-bd54-4b83-a5b1-42cae252f98d-kube-api-access-ktjcf" (OuterVolumeSpecName: "kube-api-access-ktjcf") pod "6803cf35-bd54-4b83-a5b1-42cae252f98d" (UID: "6803cf35-bd54-4b83-a5b1-42cae252f98d"). InnerVolumeSpecName "kube-api-access-ktjcf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:09.977544 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6803cf35-bd54-4b83-a5b1-42cae252f98d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6803cf35-bd54-4b83-a5b1-42cae252f98d" (UID: "6803cf35-bd54-4b83-a5b1-42cae252f98d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:09.994183 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6803cf35-bd54-4b83-a5b1-42cae252f98d-config-data" (OuterVolumeSpecName: "config-data") pod "6803cf35-bd54-4b83-a5b1-42cae252f98d" (UID: "6803cf35-bd54-4b83-a5b1-42cae252f98d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.023985 4923 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6803cf35-bd54-4b83-a5b1-42cae252f98d-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.024009 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ktjcf\" (UniqueName: \"kubernetes.io/projected/6803cf35-bd54-4b83-a5b1-42cae252f98d-kube-api-access-ktjcf\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.024020 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6803cf35-bd54-4b83-a5b1-42cae252f98d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.024028 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6803cf35-bd54-4b83-a5b1-42cae252f98d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.121925 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5fbc45745-sfgcl"] Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.481569 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-g6z8k" event={"ID":"6803cf35-bd54-4b83-a5b1-42cae252f98d","Type":"ContainerDied","Data":"0afa88dba5db741d871387adfe50f94848c6ebaff0b7f04d22133683833b5744"} Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.481958 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0afa88dba5db741d871387adfe50f94848c6ebaff0b7f04d22133683833b5744" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.481681 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-g6z8k" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.489718 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fbc45745-sfgcl" event={"ID":"71e4dc03-02ac-4443-ad2d-9d47f3f1457b","Type":"ContainerStarted","Data":"ec305071c9283a36d5f8e5b0a68786b1820307aaa28020abee294dd0f0388b47"} Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.498969 4923 generic.go:334] "Generic (PLEG): container finished" podID="4173396e-acf1-469e-9c63-4a02a2a1692b" containerID="bec9a5a63c2d24eff6a5181fa8d4b2bd83d6de38ee6453578c0aedfb3b3adb2a" exitCode=0 Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.499037 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dfhlh" event={"ID":"4173396e-acf1-469e-9c63-4a02a2a1692b","Type":"ContainerDied","Data":"bec9a5a63c2d24eff6a5181fa8d4b2bd83d6de38ee6453578c0aedfb3b3adb2a"} Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.499217 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.656036 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-659684c4b8-cg62f"] Nov 28 11:28:10 crc kubenswrapper[4923]: E1128 11:28:10.656378 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6803cf35-bd54-4b83-a5b1-42cae252f98d" containerName="placement-db-sync" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.656389 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="6803cf35-bd54-4b83-a5b1-42cae252f98d" containerName="placement-db-sync" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.656560 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="6803cf35-bd54-4b83-a5b1-42cae252f98d" containerName="placement-db-sync" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.657350 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.660296 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.660512 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-q25nv" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.660692 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.660809 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.660991 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.666609 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-659684c4b8-cg62f"] Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.745841 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-config-data\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.745915 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-logs\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.745946 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-combined-ca-bundle\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.746679 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-scripts\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.746700 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-internal-tls-certs\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.746732 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-public-tls-certs\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.746763 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24lwt\" (UniqueName: \"kubernetes.io/projected/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-kube-api-access-24lwt\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.854512 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-config-data\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.854591 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-logs\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.854610 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-combined-ca-bundle\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.854719 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-scripts\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.854746 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-internal-tls-certs\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.854775 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-public-tls-certs\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.854834 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24lwt\" (UniqueName: \"kubernetes.io/projected/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-kube-api-access-24lwt\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.864123 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-scripts\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.868662 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-config-data\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.869301 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-internal-tls-certs\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.869571 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-logs\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.876995 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-public-tls-certs\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.878978 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24lwt\" (UniqueName: \"kubernetes.io/projected/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-kube-api-access-24lwt\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.880535 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2-combined-ca-bundle\") pod \"placement-659684c4b8-cg62f\" (UID: \"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2\") " pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:10 crc kubenswrapper[4923]: I1128 11:28:10.991601 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:11 crc kubenswrapper[4923]: I1128 11:28:11.510886 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fbc45745-sfgcl" event={"ID":"71e4dc03-02ac-4443-ad2d-9d47f3f1457b","Type":"ContainerStarted","Data":"7b6d1afd8fb70335e13342df9ae4ebfd3f3b6cf279274c0b4d0d0e747464ff00"} Nov 28 11:28:14 crc kubenswrapper[4923]: I1128 11:28:14.027606 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:28:14 crc kubenswrapper[4923]: I1128 11:28:14.028053 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:28:14 crc kubenswrapper[4923]: I1128 11:28:14.028121 4923 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:28:14 crc kubenswrapper[4923]: I1128 11:28:14.029152 4923 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e854d096d0336c4d9ad4dac3da4cdf01df8dfe8d9a2f05530bd236f4a045e2f0"} pod="openshift-machine-config-operator/machine-config-daemon-bwdth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 11:28:14 crc kubenswrapper[4923]: I1128 11:28:14.029253 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" containerID="cri-o://e854d096d0336c4d9ad4dac3da4cdf01df8dfe8d9a2f05530bd236f4a045e2f0" gracePeriod=600 Nov 28 11:28:14 crc kubenswrapper[4923]: I1128 11:28:14.539877 4923 generic.go:334] "Generic (PLEG): container finished" podID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerID="e854d096d0336c4d9ad4dac3da4cdf01df8dfe8d9a2f05530bd236f4a045e2f0" exitCode=0 Nov 28 11:28:14 crc kubenswrapper[4923]: I1128 11:28:14.539925 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerDied","Data":"e854d096d0336c4d9ad4dac3da4cdf01df8dfe8d9a2f05530bd236f4a045e2f0"} Nov 28 11:28:14 crc kubenswrapper[4923]: I1128 11:28:14.539994 4923 scope.go:117] "RemoveContainer" containerID="1125a66670947f90cf2e295b500044f466e54c6f2bb9f5eb7e6841beb4d77d04" Nov 28 11:28:16 crc kubenswrapper[4923]: I1128 11:28:16.342170 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:16 crc kubenswrapper[4923]: I1128 11:28:16.446656 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7987f74bbc-9zsf6"] Nov 28 11:28:16 crc kubenswrapper[4923]: I1128 11:28:16.446890 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" podUID="b1a828ce-0622-4f74-937b-4341f9795501" containerName="dnsmasq-dns" containerID="cri-o://29e3641c4f6b0a9e94db8d56755d610f4bc6a2cf92e3ad1ee60195b37b1e3e19" gracePeriod=10 Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.127973 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.191968 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-config-data\") pod \"4173396e-acf1-469e-9c63-4a02a2a1692b\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.192026 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-credential-keys\") pod \"4173396e-acf1-469e-9c63-4a02a2a1692b\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.192156 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-combined-ca-bundle\") pod \"4173396e-acf1-469e-9c63-4a02a2a1692b\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.192198 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-scripts\") pod \"4173396e-acf1-469e-9c63-4a02a2a1692b\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.192275 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7jlfn\" (UniqueName: \"kubernetes.io/projected/4173396e-acf1-469e-9c63-4a02a2a1692b-kube-api-access-7jlfn\") pod \"4173396e-acf1-469e-9c63-4a02a2a1692b\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.192317 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-fernet-keys\") pod \"4173396e-acf1-469e-9c63-4a02a2a1692b\" (UID: \"4173396e-acf1-469e-9c63-4a02a2a1692b\") " Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.203476 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-scripts" (OuterVolumeSpecName: "scripts") pod "4173396e-acf1-469e-9c63-4a02a2a1692b" (UID: "4173396e-acf1-469e-9c63-4a02a2a1692b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.212676 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4173396e-acf1-469e-9c63-4a02a2a1692b-kube-api-access-7jlfn" (OuterVolumeSpecName: "kube-api-access-7jlfn") pod "4173396e-acf1-469e-9c63-4a02a2a1692b" (UID: "4173396e-acf1-469e-9c63-4a02a2a1692b"). InnerVolumeSpecName "kube-api-access-7jlfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.214711 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "4173396e-acf1-469e-9c63-4a02a2a1692b" (UID: "4173396e-acf1-469e-9c63-4a02a2a1692b"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.216123 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "4173396e-acf1-469e-9c63-4a02a2a1692b" (UID: "4173396e-acf1-469e-9c63-4a02a2a1692b"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.245796 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4173396e-acf1-469e-9c63-4a02a2a1692b" (UID: "4173396e-acf1-469e-9c63-4a02a2a1692b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.250206 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-config-data" (OuterVolumeSpecName: "config-data") pod "4173396e-acf1-469e-9c63-4a02a2a1692b" (UID: "4173396e-acf1-469e-9c63-4a02a2a1692b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.286180 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" podUID="b1a828ce-0622-4f74-937b-4341f9795501" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.134:5353: connect: connection refused" Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.296028 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.296229 4923 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.296240 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.296249 4923 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.296257 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7jlfn\" (UniqueName: \"kubernetes.io/projected/4173396e-acf1-469e-9c63-4a02a2a1692b-kube-api-access-7jlfn\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.296265 4923 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4173396e-acf1-469e-9c63-4a02a2a1692b-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.566698 4923 generic.go:334] "Generic (PLEG): container finished" podID="b1a828ce-0622-4f74-937b-4341f9795501" containerID="29e3641c4f6b0a9e94db8d56755d610f4bc6a2cf92e3ad1ee60195b37b1e3e19" exitCode=0 Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.566751 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" event={"ID":"b1a828ce-0622-4f74-937b-4341f9795501","Type":"ContainerDied","Data":"29e3641c4f6b0a9e94db8d56755d610f4bc6a2cf92e3ad1ee60195b37b1e3e19"} Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.568190 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dfhlh" event={"ID":"4173396e-acf1-469e-9c63-4a02a2a1692b","Type":"ContainerDied","Data":"c996374e0606ae2190204f1ddd18817dafd652109aa23b034129f0511ea6bb2f"} Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.568211 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c996374e0606ae2190204f1ddd18817dafd652109aa23b034129f0511ea6bb2f" Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.568256 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dfhlh" Nov 28 11:28:17 crc kubenswrapper[4923]: I1128 11:28:17.618687 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-659684c4b8-cg62f"] Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.363654 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-698b96f5d-97vsv"] Nov 28 11:28:18 crc kubenswrapper[4923]: E1128 11:28:18.364469 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4173396e-acf1-469e-9c63-4a02a2a1692b" containerName="keystone-bootstrap" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.364485 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="4173396e-acf1-469e-9c63-4a02a2a1692b" containerName="keystone-bootstrap" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.364648 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="4173396e-acf1-469e-9c63-4a02a2a1692b" containerName="keystone-bootstrap" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.365222 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.368693 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.369078 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.369204 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.369363 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.371820 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-4f87p" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.371992 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.375599 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-698b96f5d-97vsv"] Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.411573 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-public-tls-certs\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.411626 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thtkq\" (UniqueName: \"kubernetes.io/projected/802efe15-184f-4d4b-821f-1e55c9fe5ace-kube-api-access-thtkq\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.411654 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-config-data\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.411685 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-internal-tls-certs\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.411730 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-credential-keys\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.411764 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-scripts\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.411783 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-combined-ca-bundle\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.411801 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-fernet-keys\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.529027 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-internal-tls-certs\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.529161 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-credential-keys\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.529236 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-scripts\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.529255 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-combined-ca-bundle\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.529294 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-fernet-keys\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.529376 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-public-tls-certs\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.529420 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thtkq\" (UniqueName: \"kubernetes.io/projected/802efe15-184f-4d4b-821f-1e55c9fe5ace-kube-api-access-thtkq\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.529453 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-config-data\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.538054 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-internal-tls-certs\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.538337 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-scripts\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.538080 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-fernet-keys\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.538634 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-combined-ca-bundle\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.538719 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-public-tls-certs\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.543726 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-credential-keys\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.545605 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/802efe15-184f-4d4b-821f-1e55c9fe5ace-config-data\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.550127 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thtkq\" (UniqueName: \"kubernetes.io/projected/802efe15-184f-4d4b-821f-1e55c9fe5ace-kube-api-access-thtkq\") pod \"keystone-698b96f5d-97vsv\" (UID: \"802efe15-184f-4d4b-821f-1e55c9fe5ace\") " pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:18 crc kubenswrapper[4923]: I1128 11:28:18.683252 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.329688 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.444260 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-config\") pod \"b1a828ce-0622-4f74-937b-4341f9795501\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.444289 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-ovsdbserver-sb\") pod \"b1a828ce-0622-4f74-937b-4341f9795501\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.444359 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-dns-svc\") pod \"b1a828ce-0622-4f74-937b-4341f9795501\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.444434 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s2n5s\" (UniqueName: \"kubernetes.io/projected/b1a828ce-0622-4f74-937b-4341f9795501-kube-api-access-s2n5s\") pod \"b1a828ce-0622-4f74-937b-4341f9795501\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.444590 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-ovsdbserver-nb\") pod \"b1a828ce-0622-4f74-937b-4341f9795501\" (UID: \"b1a828ce-0622-4f74-937b-4341f9795501\") " Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.465288 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1a828ce-0622-4f74-937b-4341f9795501-kube-api-access-s2n5s" (OuterVolumeSpecName: "kube-api-access-s2n5s") pod "b1a828ce-0622-4f74-937b-4341f9795501" (UID: "b1a828ce-0622-4f74-937b-4341f9795501"). InnerVolumeSpecName "kube-api-access-s2n5s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.545858 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s2n5s\" (UniqueName: \"kubernetes.io/projected/b1a828ce-0622-4f74-937b-4341f9795501-kube-api-access-s2n5s\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.582273 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b1a828ce-0622-4f74-937b-4341f9795501" (UID: "b1a828ce-0622-4f74-937b-4341f9795501"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.583281 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b1a828ce-0622-4f74-937b-4341f9795501" (UID: "b1a828ce-0622-4f74-937b-4341f9795501"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.601877 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b1a828ce-0622-4f74-937b-4341f9795501" (UID: "b1a828ce-0622-4f74-937b-4341f9795501"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.606130 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" event={"ID":"b1a828ce-0622-4f74-937b-4341f9795501","Type":"ContainerDied","Data":"8d6b9f803496115d1a0e0bcbc0f032afca6d7d45f1ce8d53fc435628e61b81e5"} Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.606175 4923 scope.go:117] "RemoveContainer" containerID="29e3641c4f6b0a9e94db8d56755d610f4bc6a2cf92e3ad1ee60195b37b1e3e19" Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.606274 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7987f74bbc-9zsf6" Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.612170 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fbc45745-sfgcl" event={"ID":"71e4dc03-02ac-4443-ad2d-9d47f3f1457b","Type":"ContainerStarted","Data":"7a383fa53c0f4dae9b810037b12eb06e870a76610c65ba2a377d6f472bafeb75"} Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.613001 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.618738 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-659684c4b8-cg62f" event={"ID":"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2","Type":"ContainerStarted","Data":"84c1f8551b46c8ed8e34aa1621d982b7ec253534d3929a77b3eef49cc9f8d585"} Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.622559 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-config" (OuterVolumeSpecName: "config") pod "b1a828ce-0622-4f74-937b-4341f9795501" (UID: "b1a828ce-0622-4f74-937b-4341f9795501"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.624830 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerStarted","Data":"d1a2e1beb233079a250c29730400b1c9cdbf26210af36136b746e09631ce81a5"} Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.646913 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.647012 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.647024 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.647033 4923 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b1a828ce-0622-4f74-937b-4341f9795501-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.661082 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5fbc45745-sfgcl" podStartSLOduration=11.660906955 podStartE2EDuration="11.660906955s" podCreationTimestamp="2025-11-28 11:28:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:28:19.640314609 +0000 UTC m=+1178.768998819" watchObservedRunningTime="2025-11-28 11:28:19.660906955 +0000 UTC m=+1178.789591165" Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.690160 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-698b96f5d-97vsv"] Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.980944 4923 scope.go:117] "RemoveContainer" containerID="e98a43c2efd680b1f3408870f7b66c3d65b8514cb7499f2b890569825c2c10da" Nov 28 11:28:19 crc kubenswrapper[4923]: I1128 11:28:19.996147 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7987f74bbc-9zsf6"] Nov 28 11:28:20 crc kubenswrapper[4923]: I1128 11:28:20.008392 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7987f74bbc-9zsf6"] Nov 28 11:28:20 crc kubenswrapper[4923]: I1128 11:28:20.643648 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b557d79-c22c-47a7-b460-d65c25c2bca8","Type":"ContainerStarted","Data":"3e2c47547dc2cf28a7e6a39d573c710dfad60bc6c0d9c326c56c5ca481a4f7ea"} Nov 28 11:28:20 crc kubenswrapper[4923]: I1128 11:28:20.650282 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-659684c4b8-cg62f" event={"ID":"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2","Type":"ContainerStarted","Data":"836391eedd07b4126d1a651af04333fb48532b025afb42d1405402e1c635211e"} Nov 28 11:28:20 crc kubenswrapper[4923]: I1128 11:28:20.650351 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:20 crc kubenswrapper[4923]: I1128 11:28:20.650364 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-659684c4b8-cg62f" event={"ID":"36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2","Type":"ContainerStarted","Data":"9bebadf67a93100cf69ac9e22f3178bb56af48b953f65f6ab381909826339e66"} Nov 28 11:28:20 crc kubenswrapper[4923]: I1128 11:28:20.650384 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:20 crc kubenswrapper[4923]: I1128 11:28:20.654810 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-698b96f5d-97vsv" event={"ID":"802efe15-184f-4d4b-821f-1e55c9fe5ace","Type":"ContainerStarted","Data":"a77de8233b03172409d9a409ed978ddc91f15193e9506b4ae835c486aee4cc02"} Nov 28 11:28:20 crc kubenswrapper[4923]: I1128 11:28:20.654856 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-698b96f5d-97vsv" event={"ID":"802efe15-184f-4d4b-821f-1e55c9fe5ace","Type":"ContainerStarted","Data":"ef5c923eee358f686228c4d4ea25581745767898f8a8632a351943e060e13c9f"} Nov 28 11:28:20 crc kubenswrapper[4923]: I1128 11:28:20.654964 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:20 crc kubenswrapper[4923]: I1128 11:28:20.658516 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-b867g" event={"ID":"1813822f-07d2-4a68-98bf-26cf5edd6707","Type":"ContainerStarted","Data":"74b379aca93af9f2387202b8bda7b746d56fc3b1ac2a351fc25aa0037baf6098"} Nov 28 11:28:20 crc kubenswrapper[4923]: I1128 11:28:20.672898 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-659684c4b8-cg62f" podStartSLOduration=10.672884456 podStartE2EDuration="10.672884456s" podCreationTimestamp="2025-11-28 11:28:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:28:20.667747792 +0000 UTC m=+1179.796432002" watchObservedRunningTime="2025-11-28 11:28:20.672884456 +0000 UTC m=+1179.801568666" Nov 28 11:28:20 crc kubenswrapper[4923]: I1128 11:28:20.692846 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-698b96f5d-97vsv" podStartSLOduration=2.692825174 podStartE2EDuration="2.692825174s" podCreationTimestamp="2025-11-28 11:28:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:28:20.685363305 +0000 UTC m=+1179.814047515" watchObservedRunningTime="2025-11-28 11:28:20.692825174 +0000 UTC m=+1179.821509394" Nov 28 11:28:20 crc kubenswrapper[4923]: I1128 11:28:20.709761 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-b867g" podStartSLOduration=3.216480332 podStartE2EDuration="39.709742567s" podCreationTimestamp="2025-11-28 11:27:41 +0000 UTC" firstStartedPulling="2025-11-28 11:27:43.496880307 +0000 UTC m=+1142.625564507" lastFinishedPulling="2025-11-28 11:28:19.990142502 +0000 UTC m=+1179.118826742" observedRunningTime="2025-11-28 11:28:20.703335368 +0000 UTC m=+1179.832019578" watchObservedRunningTime="2025-11-28 11:28:20.709742567 +0000 UTC m=+1179.838426777" Nov 28 11:28:21 crc kubenswrapper[4923]: I1128 11:28:21.179855 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1a828ce-0622-4f74-937b-4341f9795501" path="/var/lib/kubelet/pods/b1a828ce-0622-4f74-937b-4341f9795501/volumes" Nov 28 11:28:21 crc kubenswrapper[4923]: I1128 11:28:21.670567 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-s6twc" event={"ID":"4c7bc447-b1f7-4e68-b0da-310515aecea9","Type":"ContainerStarted","Data":"4e1dcbe13bba7a95afee9778c80901bf4dff2a4cf4c8bec19d0efaa3eb2d8d02"} Nov 28 11:28:21 crc kubenswrapper[4923]: I1128 11:28:21.692975 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-s6twc" podStartSLOduration=3.804521956 podStartE2EDuration="40.692957023s" podCreationTimestamp="2025-11-28 11:27:41 +0000 UTC" firstStartedPulling="2025-11-28 11:27:43.101678694 +0000 UTC m=+1142.230362904" lastFinishedPulling="2025-11-28 11:28:19.990113711 +0000 UTC m=+1179.118797971" observedRunningTime="2025-11-28 11:28:21.692432788 +0000 UTC m=+1180.821117018" watchObservedRunningTime="2025-11-28 11:28:21.692957023 +0000 UTC m=+1180.821641243" Nov 28 11:28:30 crc kubenswrapper[4923]: I1128 11:28:30.749147 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b557d79-c22c-47a7-b460-d65c25c2bca8","Type":"ContainerStarted","Data":"212701849a5d6cd056cc743aec0309c38557296fdc86c14e28dcbd8bd58dab47"} Nov 28 11:28:30 crc kubenswrapper[4923]: I1128 11:28:30.749622 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 11:28:30 crc kubenswrapper[4923]: I1128 11:28:30.749396 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerName="proxy-httpd" containerID="cri-o://212701849a5d6cd056cc743aec0309c38557296fdc86c14e28dcbd8bd58dab47" gracePeriod=30 Nov 28 11:28:30 crc kubenswrapper[4923]: I1128 11:28:30.749343 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerName="ceilometer-central-agent" containerID="cri-o://9e974fec2fa94132da5509cd0d195f51c5708539f7a12beba354675cbebf9583" gracePeriod=30 Nov 28 11:28:30 crc kubenswrapper[4923]: I1128 11:28:30.749410 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerName="sg-core" containerID="cri-o://3e2c47547dc2cf28a7e6a39d573c710dfad60bc6c0d9c326c56c5ca481a4f7ea" gracePeriod=30 Nov 28 11:28:30 crc kubenswrapper[4923]: I1128 11:28:30.749422 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerName="ceilometer-notification-agent" containerID="cri-o://5dab8389a161c430125def1dacd522708b542950beb371ccc60924982d792314" gracePeriod=30 Nov 28 11:28:30 crc kubenswrapper[4923]: I1128 11:28:30.784089 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.913072761 podStartE2EDuration="49.784068386s" podCreationTimestamp="2025-11-28 11:27:41 +0000 UTC" firstStartedPulling="2025-11-28 11:27:43.198306231 +0000 UTC m=+1142.326990431" lastFinishedPulling="2025-11-28 11:28:30.069301846 +0000 UTC m=+1189.197986056" observedRunningTime="2025-11-28 11:28:30.778037577 +0000 UTC m=+1189.906721787" watchObservedRunningTime="2025-11-28 11:28:30.784068386 +0000 UTC m=+1189.912752586" Nov 28 11:28:31 crc kubenswrapper[4923]: I1128 11:28:31.757837 4923 generic.go:334] "Generic (PLEG): container finished" podID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerID="212701849a5d6cd056cc743aec0309c38557296fdc86c14e28dcbd8bd58dab47" exitCode=0 Nov 28 11:28:31 crc kubenswrapper[4923]: I1128 11:28:31.758085 4923 generic.go:334] "Generic (PLEG): container finished" podID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerID="3e2c47547dc2cf28a7e6a39d573c710dfad60bc6c0d9c326c56c5ca481a4f7ea" exitCode=2 Nov 28 11:28:31 crc kubenswrapper[4923]: I1128 11:28:31.758093 4923 generic.go:334] "Generic (PLEG): container finished" podID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerID="9e974fec2fa94132da5509cd0d195f51c5708539f7a12beba354675cbebf9583" exitCode=0 Nov 28 11:28:31 crc kubenswrapper[4923]: I1128 11:28:31.757893 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b557d79-c22c-47a7-b460-d65c25c2bca8","Type":"ContainerDied","Data":"212701849a5d6cd056cc743aec0309c38557296fdc86c14e28dcbd8bd58dab47"} Nov 28 11:28:31 crc kubenswrapper[4923]: I1128 11:28:31.758129 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b557d79-c22c-47a7-b460-d65c25c2bca8","Type":"ContainerDied","Data":"3e2c47547dc2cf28a7e6a39d573c710dfad60bc6c0d9c326c56c5ca481a4f7ea"} Nov 28 11:28:31 crc kubenswrapper[4923]: I1128 11:28:31.758143 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b557d79-c22c-47a7-b460-d65c25c2bca8","Type":"ContainerDied","Data":"9e974fec2fa94132da5509cd0d195f51c5708539f7a12beba354675cbebf9583"} Nov 28 11:28:32 crc kubenswrapper[4923]: I1128 11:28:32.772781 4923 generic.go:334] "Generic (PLEG): container finished" podID="1813822f-07d2-4a68-98bf-26cf5edd6707" containerID="74b379aca93af9f2387202b8bda7b746d56fc3b1ac2a351fc25aa0037baf6098" exitCode=0 Nov 28 11:28:32 crc kubenswrapper[4923]: I1128 11:28:32.772922 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-b867g" event={"ID":"1813822f-07d2-4a68-98bf-26cf5edd6707","Type":"ContainerDied","Data":"74b379aca93af9f2387202b8bda7b746d56fc3b1ac2a351fc25aa0037baf6098"} Nov 28 11:28:33 crc kubenswrapper[4923]: I1128 11:28:33.789884 4923 generic.go:334] "Generic (PLEG): container finished" podID="4c7bc447-b1f7-4e68-b0da-310515aecea9" containerID="4e1dcbe13bba7a95afee9778c80901bf4dff2a4cf4c8bec19d0efaa3eb2d8d02" exitCode=0 Nov 28 11:28:33 crc kubenswrapper[4923]: I1128 11:28:33.790011 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-s6twc" event={"ID":"4c7bc447-b1f7-4e68-b0da-310515aecea9","Type":"ContainerDied","Data":"4e1dcbe13bba7a95afee9778c80901bf4dff2a4cf4c8bec19d0efaa3eb2d8d02"} Nov 28 11:28:34 crc kubenswrapper[4923]: I1128 11:28:34.252500 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-b867g" Nov 28 11:28:34 crc kubenswrapper[4923]: I1128 11:28:34.313297 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1813822f-07d2-4a68-98bf-26cf5edd6707-db-sync-config-data\") pod \"1813822f-07d2-4a68-98bf-26cf5edd6707\" (UID: \"1813822f-07d2-4a68-98bf-26cf5edd6707\") " Nov 28 11:28:34 crc kubenswrapper[4923]: I1128 11:28:34.313375 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xr57z\" (UniqueName: \"kubernetes.io/projected/1813822f-07d2-4a68-98bf-26cf5edd6707-kube-api-access-xr57z\") pod \"1813822f-07d2-4a68-98bf-26cf5edd6707\" (UID: \"1813822f-07d2-4a68-98bf-26cf5edd6707\") " Nov 28 11:28:34 crc kubenswrapper[4923]: I1128 11:28:34.313441 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1813822f-07d2-4a68-98bf-26cf5edd6707-combined-ca-bundle\") pod \"1813822f-07d2-4a68-98bf-26cf5edd6707\" (UID: \"1813822f-07d2-4a68-98bf-26cf5edd6707\") " Nov 28 11:28:34 crc kubenswrapper[4923]: I1128 11:28:34.319827 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1813822f-07d2-4a68-98bf-26cf5edd6707-kube-api-access-xr57z" (OuterVolumeSpecName: "kube-api-access-xr57z") pod "1813822f-07d2-4a68-98bf-26cf5edd6707" (UID: "1813822f-07d2-4a68-98bf-26cf5edd6707"). InnerVolumeSpecName "kube-api-access-xr57z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:28:34 crc kubenswrapper[4923]: I1128 11:28:34.323116 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1813822f-07d2-4a68-98bf-26cf5edd6707-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "1813822f-07d2-4a68-98bf-26cf5edd6707" (UID: "1813822f-07d2-4a68-98bf-26cf5edd6707"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:34 crc kubenswrapper[4923]: I1128 11:28:34.346069 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1813822f-07d2-4a68-98bf-26cf5edd6707-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1813822f-07d2-4a68-98bf-26cf5edd6707" (UID: "1813822f-07d2-4a68-98bf-26cf5edd6707"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:34 crc kubenswrapper[4923]: I1128 11:28:34.414671 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1813822f-07d2-4a68-98bf-26cf5edd6707-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:34 crc kubenswrapper[4923]: I1128 11:28:34.414700 4923 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1813822f-07d2-4a68-98bf-26cf5edd6707-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:34 crc kubenswrapper[4923]: I1128 11:28:34.414709 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xr57z\" (UniqueName: \"kubernetes.io/projected/1813822f-07d2-4a68-98bf-26cf5edd6707-kube-api-access-xr57z\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:34 crc kubenswrapper[4923]: I1128 11:28:34.805597 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-b867g" Nov 28 11:28:34 crc kubenswrapper[4923]: I1128 11:28:34.805589 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-b867g" event={"ID":"1813822f-07d2-4a68-98bf-26cf5edd6707","Type":"ContainerDied","Data":"48704da40d1c67e86667ac5e14171583b066986ab9e16bd867eeb48a187b1be4"} Nov 28 11:28:34 crc kubenswrapper[4923]: I1128 11:28:34.805646 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="48704da40d1c67e86667ac5e14171583b066986ab9e16bd867eeb48a187b1be4" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.083650 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-58696cc594-f6zjf"] Nov 28 11:28:35 crc kubenswrapper[4923]: E1128 11:28:35.084274 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1813822f-07d2-4a68-98bf-26cf5edd6707" containerName="barbican-db-sync" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.084290 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="1813822f-07d2-4a68-98bf-26cf5edd6707" containerName="barbican-db-sync" Nov 28 11:28:35 crc kubenswrapper[4923]: E1128 11:28:35.084303 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1a828ce-0622-4f74-937b-4341f9795501" containerName="init" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.084309 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1a828ce-0622-4f74-937b-4341f9795501" containerName="init" Nov 28 11:28:35 crc kubenswrapper[4923]: E1128 11:28:35.084333 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1a828ce-0622-4f74-937b-4341f9795501" containerName="dnsmasq-dns" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.084339 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1a828ce-0622-4f74-937b-4341f9795501" containerName="dnsmasq-dns" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.084502 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="1813822f-07d2-4a68-98bf-26cf5edd6707" containerName="barbican-db-sync" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.084536 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1a828ce-0622-4f74-937b-4341f9795501" containerName="dnsmasq-dns" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.085395 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.093323 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-bgjhb" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.093631 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.093751 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.105875 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-fc7d79659-44chg"] Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.107326 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-fc7d79659-44chg" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.116096 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.129578 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ffee751-8b25-4777-8623-ced7082ca426-logs\") pod \"barbican-keystone-listener-58696cc594-f6zjf\" (UID: \"7ffee751-8b25-4777-8623-ced7082ca426\") " pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.129609 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ffee751-8b25-4777-8623-ced7082ca426-combined-ca-bundle\") pod \"barbican-keystone-listener-58696cc594-f6zjf\" (UID: \"7ffee751-8b25-4777-8623-ced7082ca426\") " pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.129665 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5b68211-f537-41d7-9b3d-859764f26575-logs\") pod \"barbican-worker-fc7d79659-44chg\" (UID: \"a5b68211-f537-41d7-9b3d-859764f26575\") " pod="openstack/barbican-worker-fc7d79659-44chg" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.129684 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ffee751-8b25-4777-8623-ced7082ca426-config-data\") pod \"barbican-keystone-listener-58696cc594-f6zjf\" (UID: \"7ffee751-8b25-4777-8623-ced7082ca426\") " pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.129703 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5b68211-f537-41d7-9b3d-859764f26575-config-data\") pod \"barbican-worker-fc7d79659-44chg\" (UID: \"a5b68211-f537-41d7-9b3d-859764f26575\") " pod="openstack/barbican-worker-fc7d79659-44chg" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.129722 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a5b68211-f537-41d7-9b3d-859764f26575-config-data-custom\") pod \"barbican-worker-fc7d79659-44chg\" (UID: \"a5b68211-f537-41d7-9b3d-859764f26575\") " pod="openstack/barbican-worker-fc7d79659-44chg" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.129752 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7ffee751-8b25-4777-8623-ced7082ca426-config-data-custom\") pod \"barbican-keystone-listener-58696cc594-f6zjf\" (UID: \"7ffee751-8b25-4777-8623-ced7082ca426\") " pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.129769 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2z59m\" (UniqueName: \"kubernetes.io/projected/a5b68211-f537-41d7-9b3d-859764f26575-kube-api-access-2z59m\") pod \"barbican-worker-fc7d79659-44chg\" (UID: \"a5b68211-f537-41d7-9b3d-859764f26575\") " pod="openstack/barbican-worker-fc7d79659-44chg" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.129815 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b68211-f537-41d7-9b3d-859764f26575-combined-ca-bundle\") pod \"barbican-worker-fc7d79659-44chg\" (UID: \"a5b68211-f537-41d7-9b3d-859764f26575\") " pod="openstack/barbican-worker-fc7d79659-44chg" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.129833 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkbr5\" (UniqueName: \"kubernetes.io/projected/7ffee751-8b25-4777-8623-ced7082ca426-kube-api-access-mkbr5\") pod \"barbican-keystone-listener-58696cc594-f6zjf\" (UID: \"7ffee751-8b25-4777-8623-ced7082ca426\") " pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.142857 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-58696cc594-f6zjf"] Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.158106 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-fc7d79659-44chg"] Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.192114 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bb684768f-9nr26"] Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.193377 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.221987 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb684768f-9nr26"] Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.231259 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5b68211-f537-41d7-9b3d-859764f26575-logs\") pod \"barbican-worker-fc7d79659-44chg\" (UID: \"a5b68211-f537-41d7-9b3d-859764f26575\") " pod="openstack/barbican-worker-fc7d79659-44chg" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.231303 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ffee751-8b25-4777-8623-ced7082ca426-config-data\") pod \"barbican-keystone-listener-58696cc594-f6zjf\" (UID: \"7ffee751-8b25-4777-8623-ced7082ca426\") " pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.231328 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5b68211-f537-41d7-9b3d-859764f26575-config-data\") pod \"barbican-worker-fc7d79659-44chg\" (UID: \"a5b68211-f537-41d7-9b3d-859764f26575\") " pod="openstack/barbican-worker-fc7d79659-44chg" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.231351 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a5b68211-f537-41d7-9b3d-859764f26575-config-data-custom\") pod \"barbican-worker-fc7d79659-44chg\" (UID: \"a5b68211-f537-41d7-9b3d-859764f26575\") " pod="openstack/barbican-worker-fc7d79659-44chg" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.231379 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7ffee751-8b25-4777-8623-ced7082ca426-config-data-custom\") pod \"barbican-keystone-listener-58696cc594-f6zjf\" (UID: \"7ffee751-8b25-4777-8623-ced7082ca426\") " pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.231399 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2z59m\" (UniqueName: \"kubernetes.io/projected/a5b68211-f537-41d7-9b3d-859764f26575-kube-api-access-2z59m\") pod \"barbican-worker-fc7d79659-44chg\" (UID: \"a5b68211-f537-41d7-9b3d-859764f26575\") " pod="openstack/barbican-worker-fc7d79659-44chg" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.231438 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b68211-f537-41d7-9b3d-859764f26575-combined-ca-bundle\") pod \"barbican-worker-fc7d79659-44chg\" (UID: \"a5b68211-f537-41d7-9b3d-859764f26575\") " pod="openstack/barbican-worker-fc7d79659-44chg" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.231457 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkbr5\" (UniqueName: \"kubernetes.io/projected/7ffee751-8b25-4777-8623-ced7082ca426-kube-api-access-mkbr5\") pod \"barbican-keystone-listener-58696cc594-f6zjf\" (UID: \"7ffee751-8b25-4777-8623-ced7082ca426\") " pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.232013 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5b68211-f537-41d7-9b3d-859764f26575-logs\") pod \"barbican-worker-fc7d79659-44chg\" (UID: \"a5b68211-f537-41d7-9b3d-859764f26575\") " pod="openstack/barbican-worker-fc7d79659-44chg" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.232696 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ffee751-8b25-4777-8623-ced7082ca426-logs\") pod \"barbican-keystone-listener-58696cc594-f6zjf\" (UID: \"7ffee751-8b25-4777-8623-ced7082ca426\") " pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.232733 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ffee751-8b25-4777-8623-ced7082ca426-combined-ca-bundle\") pod \"barbican-keystone-listener-58696cc594-f6zjf\" (UID: \"7ffee751-8b25-4777-8623-ced7082ca426\") " pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.247112 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ffee751-8b25-4777-8623-ced7082ca426-logs\") pod \"barbican-keystone-listener-58696cc594-f6zjf\" (UID: \"7ffee751-8b25-4777-8623-ced7082ca426\") " pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.249142 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ffee751-8b25-4777-8623-ced7082ca426-combined-ca-bundle\") pod \"barbican-keystone-listener-58696cc594-f6zjf\" (UID: \"7ffee751-8b25-4777-8623-ced7082ca426\") " pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.252895 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b68211-f537-41d7-9b3d-859764f26575-combined-ca-bundle\") pod \"barbican-worker-fc7d79659-44chg\" (UID: \"a5b68211-f537-41d7-9b3d-859764f26575\") " pod="openstack/barbican-worker-fc7d79659-44chg" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.256423 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a5b68211-f537-41d7-9b3d-859764f26575-config-data-custom\") pod \"barbican-worker-fc7d79659-44chg\" (UID: \"a5b68211-f537-41d7-9b3d-859764f26575\") " pod="openstack/barbican-worker-fc7d79659-44chg" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.279476 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7ffee751-8b25-4777-8623-ced7082ca426-config-data-custom\") pod \"barbican-keystone-listener-58696cc594-f6zjf\" (UID: \"7ffee751-8b25-4777-8623-ced7082ca426\") " pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.283333 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ffee751-8b25-4777-8623-ced7082ca426-config-data\") pod \"barbican-keystone-listener-58696cc594-f6zjf\" (UID: \"7ffee751-8b25-4777-8623-ced7082ca426\") " pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.283478 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5b68211-f537-41d7-9b3d-859764f26575-config-data\") pod \"barbican-worker-fc7d79659-44chg\" (UID: \"a5b68211-f537-41d7-9b3d-859764f26575\") " pod="openstack/barbican-worker-fc7d79659-44chg" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.283652 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkbr5\" (UniqueName: \"kubernetes.io/projected/7ffee751-8b25-4777-8623-ced7082ca426-kube-api-access-mkbr5\") pod \"barbican-keystone-listener-58696cc594-f6zjf\" (UID: \"7ffee751-8b25-4777-8623-ced7082ca426\") " pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.298509 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2z59m\" (UniqueName: \"kubernetes.io/projected/a5b68211-f537-41d7-9b3d-859764f26575-kube-api-access-2z59m\") pod \"barbican-worker-fc7d79659-44chg\" (UID: \"a5b68211-f537-41d7-9b3d-859764f26575\") " pod="openstack/barbican-worker-fc7d79659-44chg" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.334183 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb684768f-9nr26\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.334318 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb684768f-9nr26\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.334338 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7jtc\" (UniqueName: \"kubernetes.io/projected/b515b733-f22b-448f-a4e9-ec3caf3cc293-kube-api-access-t7jtc\") pod \"dnsmasq-dns-6bb684768f-9nr26\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.334394 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-dns-svc\") pod \"dnsmasq-dns-6bb684768f-9nr26\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.334414 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-config\") pod \"dnsmasq-dns-6bb684768f-9nr26\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.389654 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-86dc447b-tv8qs"] Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.391011 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.399025 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.403500 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-86dc447b-tv8qs"] Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.429434 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.435093 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-fc7d79659-44chg" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.435587 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb684768f-9nr26\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.435618 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7jtc\" (UniqueName: \"kubernetes.io/projected/b515b733-f22b-448f-a4e9-ec3caf3cc293-kube-api-access-t7jtc\") pod \"dnsmasq-dns-6bb684768f-9nr26\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.435666 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-dns-svc\") pod \"dnsmasq-dns-6bb684768f-9nr26\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.435684 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-config\") pod \"dnsmasq-dns-6bb684768f-9nr26\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.435717 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb684768f-9nr26\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.436474 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-dns-svc\") pod \"dnsmasq-dns-6bb684768f-9nr26\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.436478 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb684768f-9nr26\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.437049 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-config\") pod \"dnsmasq-dns-6bb684768f-9nr26\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.437072 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb684768f-9nr26\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.485099 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7jtc\" (UniqueName: \"kubernetes.io/projected/b515b733-f22b-448f-a4e9-ec3caf3cc293-kube-api-access-t7jtc\") pod \"dnsmasq-dns-6bb684768f-9nr26\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.544687 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/216d256f-0479-4b10-bf92-f59aef3136bc-config-data-custom\") pod \"barbican-api-86dc447b-tv8qs\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.544736 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/216d256f-0479-4b10-bf92-f59aef3136bc-combined-ca-bundle\") pod \"barbican-api-86dc447b-tv8qs\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.544777 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlxr6\" (UniqueName: \"kubernetes.io/projected/216d256f-0479-4b10-bf92-f59aef3136bc-kube-api-access-jlxr6\") pod \"barbican-api-86dc447b-tv8qs\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.544809 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.544867 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/216d256f-0479-4b10-bf92-f59aef3136bc-logs\") pod \"barbican-api-86dc447b-tv8qs\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.544887 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/216d256f-0479-4b10-bf92-f59aef3136bc-config-data\") pod \"barbican-api-86dc447b-tv8qs\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.582916 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-s6twc" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.645749 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/216d256f-0479-4b10-bf92-f59aef3136bc-logs\") pod \"barbican-api-86dc447b-tv8qs\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.645787 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/216d256f-0479-4b10-bf92-f59aef3136bc-config-data\") pod \"barbican-api-86dc447b-tv8qs\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.645824 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/216d256f-0479-4b10-bf92-f59aef3136bc-config-data-custom\") pod \"barbican-api-86dc447b-tv8qs\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.645850 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/216d256f-0479-4b10-bf92-f59aef3136bc-combined-ca-bundle\") pod \"barbican-api-86dc447b-tv8qs\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.645908 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlxr6\" (UniqueName: \"kubernetes.io/projected/216d256f-0479-4b10-bf92-f59aef3136bc-kube-api-access-jlxr6\") pod \"barbican-api-86dc447b-tv8qs\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.646555 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/216d256f-0479-4b10-bf92-f59aef3136bc-logs\") pod \"barbican-api-86dc447b-tv8qs\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.652497 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/216d256f-0479-4b10-bf92-f59aef3136bc-config-data-custom\") pod \"barbican-api-86dc447b-tv8qs\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.652611 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/216d256f-0479-4b10-bf92-f59aef3136bc-combined-ca-bundle\") pod \"barbican-api-86dc447b-tv8qs\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.660661 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/216d256f-0479-4b10-bf92-f59aef3136bc-config-data\") pod \"barbican-api-86dc447b-tv8qs\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.666566 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlxr6\" (UniqueName: \"kubernetes.io/projected/216d256f-0479-4b10-bf92-f59aef3136bc-kube-api-access-jlxr6\") pod \"barbican-api-86dc447b-tv8qs\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.713888 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.747802 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-config-data\") pod \"4c7bc447-b1f7-4e68-b0da-310515aecea9\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.748244 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-db-sync-config-data\") pod \"4c7bc447-b1f7-4e68-b0da-310515aecea9\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.748305 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c7bc447-b1f7-4e68-b0da-310515aecea9-etc-machine-id\") pod \"4c7bc447-b1f7-4e68-b0da-310515aecea9\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.748364 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-scripts\") pod \"4c7bc447-b1f7-4e68-b0da-310515aecea9\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.748530 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qldm2\" (UniqueName: \"kubernetes.io/projected/4c7bc447-b1f7-4e68-b0da-310515aecea9-kube-api-access-qldm2\") pod \"4c7bc447-b1f7-4e68-b0da-310515aecea9\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.748618 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-combined-ca-bundle\") pod \"4c7bc447-b1f7-4e68-b0da-310515aecea9\" (UID: \"4c7bc447-b1f7-4e68-b0da-310515aecea9\") " Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.748663 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4c7bc447-b1f7-4e68-b0da-310515aecea9-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "4c7bc447-b1f7-4e68-b0da-310515aecea9" (UID: "4c7bc447-b1f7-4e68-b0da-310515aecea9"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.749557 4923 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c7bc447-b1f7-4e68-b0da-310515aecea9-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.757161 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "4c7bc447-b1f7-4e68-b0da-310515aecea9" (UID: "4c7bc447-b1f7-4e68-b0da-310515aecea9"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.761256 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c7bc447-b1f7-4e68-b0da-310515aecea9-kube-api-access-qldm2" (OuterVolumeSpecName: "kube-api-access-qldm2") pod "4c7bc447-b1f7-4e68-b0da-310515aecea9" (UID: "4c7bc447-b1f7-4e68-b0da-310515aecea9"). InnerVolumeSpecName "kube-api-access-qldm2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.758332 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-scripts" (OuterVolumeSpecName: "scripts") pod "4c7bc447-b1f7-4e68-b0da-310515aecea9" (UID: "4c7bc447-b1f7-4e68-b0da-310515aecea9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.816097 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c7bc447-b1f7-4e68-b0da-310515aecea9" (UID: "4c7bc447-b1f7-4e68-b0da-310515aecea9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.827234 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-s6twc" event={"ID":"4c7bc447-b1f7-4e68-b0da-310515aecea9","Type":"ContainerDied","Data":"89f6e0510e742e0a877dbf339933cfb201113596e86cd286824092f591098acc"} Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.827449 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="89f6e0510e742e0a877dbf339933cfb201113596e86cd286824092f591098acc" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.827506 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-s6twc" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.839070 4923 generic.go:334] "Generic (PLEG): container finished" podID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerID="5dab8389a161c430125def1dacd522708b542950beb371ccc60924982d792314" exitCode=0 Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.839182 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b557d79-c22c-47a7-b460-d65c25c2bca8","Type":"ContainerDied","Data":"5dab8389a161c430125def1dacd522708b542950beb371ccc60924982d792314"} Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.860681 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-config-data" (OuterVolumeSpecName: "config-data") pod "4c7bc447-b1f7-4e68-b0da-310515aecea9" (UID: "4c7bc447-b1f7-4e68-b0da-310515aecea9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.860796 4923 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.860826 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qldm2\" (UniqueName: \"kubernetes.io/projected/4c7bc447-b1f7-4e68-b0da-310515aecea9-kube-api-access-qldm2\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.860837 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.860847 4923 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:35 crc kubenswrapper[4923]: I1128 11:28:35.962652 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c7bc447-b1f7-4e68-b0da-310515aecea9-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.113767 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 11:28:36 crc kubenswrapper[4923]: E1128 11:28:36.114207 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c7bc447-b1f7-4e68-b0da-310515aecea9" containerName="cinder-db-sync" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.114229 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c7bc447-b1f7-4e68-b0da-310515aecea9" containerName="cinder-db-sync" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.114416 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c7bc447-b1f7-4e68-b0da-310515aecea9" containerName="cinder-db-sync" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.118014 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.129028 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-58696cc594-f6zjf"] Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.134283 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.141228 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.165795 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.165828 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrrkj\" (UniqueName: \"kubernetes.io/projected/d769371b-b401-4423-beb7-fb8b0ac9e6e0-kube-api-access-vrrkj\") pod \"cinder-scheduler-0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.165857 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-config-data\") pod \"cinder-scheduler-0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.165882 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.165913 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d769371b-b401-4423-beb7-fb8b0ac9e6e0-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.165968 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-scripts\") pod \"cinder-scheduler-0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.270006 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.270253 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrrkj\" (UniqueName: \"kubernetes.io/projected/d769371b-b401-4423-beb7-fb8b0ac9e6e0-kube-api-access-vrrkj\") pod \"cinder-scheduler-0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.270290 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-config-data\") pod \"cinder-scheduler-0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.270323 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.270385 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d769371b-b401-4423-beb7-fb8b0ac9e6e0-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.270463 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-scripts\") pod \"cinder-scheduler-0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.271025 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d769371b-b401-4423-beb7-fb8b0ac9e6e0-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.277318 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-config-data\") pod \"cinder-scheduler-0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.293145 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-scripts\") pod \"cinder-scheduler-0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.313015 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.320704 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.330015 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb684768f-9nr26"] Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.360332 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrrkj\" (UniqueName: \"kubernetes.io/projected/d769371b-b401-4423-beb7-fb8b0ac9e6e0-kube-api-access-vrrkj\") pod \"cinder-scheduler-0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.366979 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb684768f-9nr26"] Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.411899 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-vr5qb"] Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.413270 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.448581 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-fc7d79659-44chg"] Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.476186 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-vr5qb"] Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.490305 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-ovsdbserver-sb\") pod \"dnsmasq-dns-6d97fcdd8f-vr5qb\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.490367 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xvwk\" (UniqueName: \"kubernetes.io/projected/d1826eb1-8cef-42e9-a892-824c61ad704a-kube-api-access-7xvwk\") pod \"dnsmasq-dns-6d97fcdd8f-vr5qb\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.490392 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-config\") pod \"dnsmasq-dns-6d97fcdd8f-vr5qb\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.490416 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-dns-svc\") pod \"dnsmasq-dns-6d97fcdd8f-vr5qb\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.490482 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-ovsdbserver-nb\") pod \"dnsmasq-dns-6d97fcdd8f-vr5qb\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.516566 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.546237 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.592539 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-combined-ca-bundle\") pod \"5b557d79-c22c-47a7-b460-d65c25c2bca8\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.592601 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b557d79-c22c-47a7-b460-d65c25c2bca8-run-httpd\") pod \"5b557d79-c22c-47a7-b460-d65c25c2bca8\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.592692 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-config-data\") pod \"5b557d79-c22c-47a7-b460-d65c25c2bca8\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.592721 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-scripts\") pod \"5b557d79-c22c-47a7-b460-d65c25c2bca8\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.592745 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-sg-core-conf-yaml\") pod \"5b557d79-c22c-47a7-b460-d65c25c2bca8\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.592769 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4dlbl\" (UniqueName: \"kubernetes.io/projected/5b557d79-c22c-47a7-b460-d65c25c2bca8-kube-api-access-4dlbl\") pod \"5b557d79-c22c-47a7-b460-d65c25c2bca8\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.592795 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b557d79-c22c-47a7-b460-d65c25c2bca8-log-httpd\") pod \"5b557d79-c22c-47a7-b460-d65c25c2bca8\" (UID: \"5b557d79-c22c-47a7-b460-d65c25c2bca8\") " Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.593022 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-ovsdbserver-nb\") pod \"dnsmasq-dns-6d97fcdd8f-vr5qb\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.593089 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-ovsdbserver-sb\") pod \"dnsmasq-dns-6d97fcdd8f-vr5qb\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.593126 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xvwk\" (UniqueName: \"kubernetes.io/projected/d1826eb1-8cef-42e9-a892-824c61ad704a-kube-api-access-7xvwk\") pod \"dnsmasq-dns-6d97fcdd8f-vr5qb\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.593150 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-config\") pod \"dnsmasq-dns-6d97fcdd8f-vr5qb\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.593178 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-dns-svc\") pod \"dnsmasq-dns-6d97fcdd8f-vr5qb\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.594003 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-dns-svc\") pod \"dnsmasq-dns-6d97fcdd8f-vr5qb\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.596043 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-ovsdbserver-sb\") pod \"dnsmasq-dns-6d97fcdd8f-vr5qb\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.596263 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b557d79-c22c-47a7-b460-d65c25c2bca8-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5b557d79-c22c-47a7-b460-d65c25c2bca8" (UID: "5b557d79-c22c-47a7-b460-d65c25c2bca8"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.596367 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-config\") pod \"dnsmasq-dns-6d97fcdd8f-vr5qb\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.599805 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b557d79-c22c-47a7-b460-d65c25c2bca8-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5b557d79-c22c-47a7-b460-d65c25c2bca8" (UID: "5b557d79-c22c-47a7-b460-d65c25c2bca8"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.600729 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-ovsdbserver-nb\") pod \"dnsmasq-dns-6d97fcdd8f-vr5qb\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.608176 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-scripts" (OuterVolumeSpecName: "scripts") pod "5b557d79-c22c-47a7-b460-d65c25c2bca8" (UID: "5b557d79-c22c-47a7-b460-d65c25c2bca8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.618748 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b557d79-c22c-47a7-b460-d65c25c2bca8-kube-api-access-4dlbl" (OuterVolumeSpecName: "kube-api-access-4dlbl") pod "5b557d79-c22c-47a7-b460-d65c25c2bca8" (UID: "5b557d79-c22c-47a7-b460-d65c25c2bca8"). InnerVolumeSpecName "kube-api-access-4dlbl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.649553 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xvwk\" (UniqueName: \"kubernetes.io/projected/d1826eb1-8cef-42e9-a892-824c61ad704a-kube-api-access-7xvwk\") pod \"dnsmasq-dns-6d97fcdd8f-vr5qb\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.675728 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-86dc447b-tv8qs"] Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.693302 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 11:28:36 crc kubenswrapper[4923]: E1128 11:28:36.693629 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerName="ceilometer-notification-agent" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.693642 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerName="ceilometer-notification-agent" Nov 28 11:28:36 crc kubenswrapper[4923]: E1128 11:28:36.693653 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerName="ceilometer-central-agent" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.693660 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerName="ceilometer-central-agent" Nov 28 11:28:36 crc kubenswrapper[4923]: E1128 11:28:36.693684 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerName="proxy-httpd" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.693690 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerName="proxy-httpd" Nov 28 11:28:36 crc kubenswrapper[4923]: E1128 11:28:36.693700 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerName="sg-core" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.693705 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerName="sg-core" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.693852 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerName="ceilometer-notification-agent" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.693873 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerName="ceilometer-central-agent" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.693896 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerName="proxy-httpd" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.693904 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b557d79-c22c-47a7-b460-d65c25c2bca8" containerName="sg-core" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.694739 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.698760 4923 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.698783 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4dlbl\" (UniqueName: \"kubernetes.io/projected/5b557d79-c22c-47a7-b460-d65c25c2bca8-kube-api-access-4dlbl\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.698794 4923 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b557d79-c22c-47a7-b460-d65c25c2bca8-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.698803 4923 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b557d79-c22c-47a7-b460-d65c25c2bca8-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.713753 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.730447 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.788224 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5b557d79-c22c-47a7-b460-d65c25c2bca8" (UID: "5b557d79-c22c-47a7-b460-d65c25c2bca8"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.799567 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-config-data\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.799611 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6kx5\" (UniqueName: \"kubernetes.io/projected/0577bf3a-49d8-4540-92f3-fa1703570c2d-kube-api-access-x6kx5\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.799671 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-scripts\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.799689 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0577bf3a-49d8-4540-92f3-fa1703570c2d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.799742 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0577bf3a-49d8-4540-92f3-fa1703570c2d-logs\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.799762 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.799776 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-config-data-custom\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.799818 4923 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.849291 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.890106 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" event={"ID":"7ffee751-8b25-4777-8623-ced7082ca426","Type":"ContainerStarted","Data":"512ff8767a468d5ee03c77bade690e2782aa4510a359c9955beab6c72b09b725"} Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.892180 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.905905 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-config-data\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.905971 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6kx5\" (UniqueName: \"kubernetes.io/projected/0577bf3a-49d8-4540-92f3-fa1703570c2d-kube-api-access-x6kx5\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.906034 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-scripts\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.906055 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0577bf3a-49d8-4540-92f3-fa1703570c2d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.906109 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0577bf3a-49d8-4540-92f3-fa1703570c2d-logs\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.906127 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.906144 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-config-data-custom\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.907192 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0577bf3a-49d8-4540-92f3-fa1703570c2d-logs\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.907252 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0577bf3a-49d8-4540-92f3-fa1703570c2d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.929160 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-scripts\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.929715 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.934942 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-config-data-custom\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.936124 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-config-data\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.941505 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6kx5\" (UniqueName: \"kubernetes.io/projected/0577bf3a-49d8-4540-92f3-fa1703570c2d-kube-api-access-x6kx5\") pod \"cinder-api-0\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " pod="openstack/cinder-api-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.954173 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86dc447b-tv8qs" event={"ID":"216d256f-0479-4b10-bf92-f59aef3136bc","Type":"ContainerStarted","Data":"6a5a60f6a063d10036cb0a515ccd528686b2aa9ab2f427716f164cf121791545"} Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.977831 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b557d79-c22c-47a7-b460-d65c25c2bca8","Type":"ContainerDied","Data":"dcff9cdb1a48423ca510ed75fc200d4c1f20c04e74ca02ec6d7b1627c5ec8ed4"} Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.977882 4923 scope.go:117] "RemoveContainer" containerID="212701849a5d6cd056cc743aec0309c38557296fdc86c14e28dcbd8bd58dab47" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.978014 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:28:36 crc kubenswrapper[4923]: I1128 11:28:36.982044 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5b557d79-c22c-47a7-b460-d65c25c2bca8" (UID: "5b557d79-c22c-47a7-b460-d65c25c2bca8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.004490 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-fc7d79659-44chg" event={"ID":"a5b68211-f537-41d7-9b3d-859764f26575","Type":"ContainerStarted","Data":"1e92a5198a998a00bf5e4f10cb12b9f76c70d689d918068563d6efba286cba2b"} Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.008403 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb684768f-9nr26" event={"ID":"b515b733-f22b-448f-a4e9-ec3caf3cc293","Type":"ContainerStarted","Data":"c6b0001a566106761c3206495d05d2d61039bc66231180db5b6250f60d2d4a13"} Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.014668 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.027995 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-config-data" (OuterVolumeSpecName: "config-data") pod "5b557d79-c22c-47a7-b460-d65c25c2bca8" (UID: "5b557d79-c22c-47a7-b460-d65c25c2bca8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.060574 4923 scope.go:117] "RemoveContainer" containerID="3e2c47547dc2cf28a7e6a39d573c710dfad60bc6c0d9c326c56c5ca481a4f7ea" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.103424 4923 scope.go:117] "RemoveContainer" containerID="5dab8389a161c430125def1dacd522708b542950beb371ccc60924982d792314" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.112714 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.119760 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b557d79-c22c-47a7-b460-d65c25c2bca8-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.166741 4923 scope.go:117] "RemoveContainer" containerID="9e974fec2fa94132da5509cd0d195f51c5708539f7a12beba354675cbebf9583" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.360006 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.429030 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.451195 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.465885 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.470794 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.472422 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.472433 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.473148 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.533236 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-run-httpd\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.533602 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-config-data\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.533646 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-scripts\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.533687 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-log-httpd\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.533774 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.533853 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.533885 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbzlk\" (UniqueName: \"kubernetes.io/projected/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-kube-api-access-tbzlk\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.597753 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-vr5qb"] Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.639769 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.639808 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbzlk\" (UniqueName: \"kubernetes.io/projected/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-kube-api-access-tbzlk\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.639863 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-run-httpd\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.639887 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-config-data\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.639904 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-scripts\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.639980 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-log-httpd\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.640023 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.640333 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-run-httpd\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.646384 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-log-httpd\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.651279 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.653540 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-scripts\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.654965 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.654999 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-config-data\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.665223 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbzlk\" (UniqueName: \"kubernetes.io/projected/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-kube-api-access-tbzlk\") pod \"ceilometer-0\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.798363 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:28:37 crc kubenswrapper[4923]: I1128 11:28:37.799275 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.027789 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d769371b-b401-4423-beb7-fb8b0ac9e6e0","Type":"ContainerStarted","Data":"69a9e7b824e0ab0f83e0f89dba19aa97a140a10c293b3616ab68d13d10ab924a"} Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.030187 4923 generic.go:334] "Generic (PLEG): container finished" podID="d1826eb1-8cef-42e9-a892-824c61ad704a" containerID="22393a4db8df8b155ef6d78ae071068e8b4bfb9b1ae8dc0f5e137eae1701648a" exitCode=0 Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.030230 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" event={"ID":"d1826eb1-8cef-42e9-a892-824c61ad704a","Type":"ContainerDied","Data":"22393a4db8df8b155ef6d78ae071068e8b4bfb9b1ae8dc0f5e137eae1701648a"} Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.030245 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" event={"ID":"d1826eb1-8cef-42e9-a892-824c61ad704a","Type":"ContainerStarted","Data":"f8b71dc55eab5bd398e889652a356b311164431fe5eba6eff4a3e4d89457143f"} Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.042010 4923 generic.go:334] "Generic (PLEG): container finished" podID="b515b733-f22b-448f-a4e9-ec3caf3cc293" containerID="558957f69bd79aec06a9e99d2aca5a6a5693ad37e041404f0ee7e8e8551bfa47" exitCode=0 Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.042079 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb684768f-9nr26" event={"ID":"b515b733-f22b-448f-a4e9-ec3caf3cc293","Type":"ContainerDied","Data":"558957f69bd79aec06a9e99d2aca5a6a5693ad37e041404f0ee7e8e8551bfa47"} Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.045383 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0577bf3a-49d8-4540-92f3-fa1703570c2d","Type":"ContainerStarted","Data":"c26f8607f1a279ccb2ade3e7bd89c6d1aeeed0581d80b936cc24ba4736a2aa81"} Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.053554 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86dc447b-tv8qs" event={"ID":"216d256f-0479-4b10-bf92-f59aef3136bc","Type":"ContainerStarted","Data":"4ad8877683db2431c87adaea4885f969af2b0eeb49a3d655c2a2c8ffd67d706c"} Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.053595 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86dc447b-tv8qs" event={"ID":"216d256f-0479-4b10-bf92-f59aef3136bc","Type":"ContainerStarted","Data":"1182aaafabe027a8ccbdf4064f9d7a64ba5036316d416faf774b1d223fabd706"} Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.054337 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.054361 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.100147 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-86dc447b-tv8qs" podStartSLOduration=3.100131128 podStartE2EDuration="3.100131128s" podCreationTimestamp="2025-11-28 11:28:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:28:38.086068334 +0000 UTC m=+1197.214752544" watchObservedRunningTime="2025-11-28 11:28:38.100131128 +0000 UTC m=+1197.228815338" Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.316345 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.501484 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.558188 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7jtc\" (UniqueName: \"kubernetes.io/projected/b515b733-f22b-448f-a4e9-ec3caf3cc293-kube-api-access-t7jtc\") pod \"b515b733-f22b-448f-a4e9-ec3caf3cc293\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.558334 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-config\") pod \"b515b733-f22b-448f-a4e9-ec3caf3cc293\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.558407 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-dns-svc\") pod \"b515b733-f22b-448f-a4e9-ec3caf3cc293\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.558457 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-ovsdbserver-sb\") pod \"b515b733-f22b-448f-a4e9-ec3caf3cc293\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.558489 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-ovsdbserver-nb\") pod \"b515b733-f22b-448f-a4e9-ec3caf3cc293\" (UID: \"b515b733-f22b-448f-a4e9-ec3caf3cc293\") " Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.566361 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b515b733-f22b-448f-a4e9-ec3caf3cc293-kube-api-access-t7jtc" (OuterVolumeSpecName: "kube-api-access-t7jtc") pod "b515b733-f22b-448f-a4e9-ec3caf3cc293" (UID: "b515b733-f22b-448f-a4e9-ec3caf3cc293"). InnerVolumeSpecName "kube-api-access-t7jtc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.595723 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b515b733-f22b-448f-a4e9-ec3caf3cc293" (UID: "b515b733-f22b-448f-a4e9-ec3caf3cc293"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.596483 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-config" (OuterVolumeSpecName: "config") pod "b515b733-f22b-448f-a4e9-ec3caf3cc293" (UID: "b515b733-f22b-448f-a4e9-ec3caf3cc293"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.596582 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b515b733-f22b-448f-a4e9-ec3caf3cc293" (UID: "b515b733-f22b-448f-a4e9-ec3caf3cc293"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.622967 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b515b733-f22b-448f-a4e9-ec3caf3cc293" (UID: "b515b733-f22b-448f-a4e9-ec3caf3cc293"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.660418 4923 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.660444 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.660697 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.660834 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7jtc\" (UniqueName: \"kubernetes.io/projected/b515b733-f22b-448f-a4e9-ec3caf3cc293-kube-api-access-t7jtc\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:38 crc kubenswrapper[4923]: I1128 11:28:38.660852 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b515b733-f22b-448f-a4e9-ec3caf3cc293-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:39 crc kubenswrapper[4923]: I1128 11:28:39.076233 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" event={"ID":"d1826eb1-8cef-42e9-a892-824c61ad704a","Type":"ContainerStarted","Data":"81b62b488e165ea53f1fa56cc2b9e7bec2d122cc2c430235b672550bab1954cf"} Nov 28 11:28:39 crc kubenswrapper[4923]: I1128 11:28:39.076687 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:39 crc kubenswrapper[4923]: I1128 11:28:39.078240 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb684768f-9nr26" event={"ID":"b515b733-f22b-448f-a4e9-ec3caf3cc293","Type":"ContainerDied","Data":"c6b0001a566106761c3206495d05d2d61039bc66231180db5b6250f60d2d4a13"} Nov 28 11:28:39 crc kubenswrapper[4923]: I1128 11:28:39.078292 4923 scope.go:117] "RemoveContainer" containerID="558957f69bd79aec06a9e99d2aca5a6a5693ad37e041404f0ee7e8e8551bfa47" Nov 28 11:28:39 crc kubenswrapper[4923]: I1128 11:28:39.079068 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb684768f-9nr26" Nov 28 11:28:39 crc kubenswrapper[4923]: I1128 11:28:39.082716 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0577bf3a-49d8-4540-92f3-fa1703570c2d","Type":"ContainerStarted","Data":"1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9"} Nov 28 11:28:39 crc kubenswrapper[4923]: I1128 11:28:39.084846 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53","Type":"ContainerStarted","Data":"feca1db351b861bc494151e862361e7d81763093f60c6eb7a95b13b547f521f5"} Nov 28 11:28:39 crc kubenswrapper[4923]: I1128 11:28:39.093075 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" podStartSLOduration=3.093055305 podStartE2EDuration="3.093055305s" podCreationTimestamp="2025-11-28 11:28:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:28:39.091475061 +0000 UTC m=+1198.220159281" watchObservedRunningTime="2025-11-28 11:28:39.093055305 +0000 UTC m=+1198.221739515" Nov 28 11:28:39 crc kubenswrapper[4923]: I1128 11:28:39.162819 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb684768f-9nr26"] Nov 28 11:28:39 crc kubenswrapper[4923]: I1128 11:28:39.186022 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b557d79-c22c-47a7-b460-d65c25c2bca8" path="/var/lib/kubelet/pods/5b557d79-c22c-47a7-b460-d65c25c2bca8/volumes" Nov 28 11:28:39 crc kubenswrapper[4923]: I1128 11:28:39.187009 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bb684768f-9nr26"] Nov 28 11:28:39 crc kubenswrapper[4923]: I1128 11:28:39.409772 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5fbc45745-sfgcl" Nov 28 11:28:39 crc kubenswrapper[4923]: I1128 11:28:39.480861 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-57876647b-g86pl"] Nov 28 11:28:39 crc kubenswrapper[4923]: I1128 11:28:39.489336 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-57876647b-g86pl" podUID="322dfb9b-c79f-4d58-96eb-265da89196f4" containerName="neutron-api" containerID="cri-o://4c349486c4d323a0c1525ced697c1addeeda2efe88a583d486bc07c71d95db8a" gracePeriod=30 Nov 28 11:28:39 crc kubenswrapper[4923]: I1128 11:28:39.489443 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-57876647b-g86pl" podUID="322dfb9b-c79f-4d58-96eb-265da89196f4" containerName="neutron-httpd" containerID="cri-o://18134142dd802445c28c83de513c0e68371687ade17d40fc5952f5934c47d926" gracePeriod=30 Nov 28 11:28:39 crc kubenswrapper[4923]: I1128 11:28:39.625483 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 11:28:40 crc kubenswrapper[4923]: I1128 11:28:40.109519 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" event={"ID":"7ffee751-8b25-4777-8623-ced7082ca426","Type":"ContainerStarted","Data":"e37860be879c31be86836c9bf4d4d183e0c032fc359631781179a6c1772a3c0c"} Nov 28 11:28:40 crc kubenswrapper[4923]: I1128 11:28:40.116221 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53","Type":"ContainerStarted","Data":"88f0dac3bd9080ba61e7e1b7e5bf76c7b362aaedeb7246a4b2430b69948f1267"} Nov 28 11:28:40 crc kubenswrapper[4923]: I1128 11:28:40.143870 4923 generic.go:334] "Generic (PLEG): container finished" podID="322dfb9b-c79f-4d58-96eb-265da89196f4" containerID="18134142dd802445c28c83de513c0e68371687ade17d40fc5952f5934c47d926" exitCode=0 Nov 28 11:28:40 crc kubenswrapper[4923]: I1128 11:28:40.143921 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-57876647b-g86pl" event={"ID":"322dfb9b-c79f-4d58-96eb-265da89196f4","Type":"ContainerDied","Data":"18134142dd802445c28c83de513c0e68371687ade17d40fc5952f5934c47d926"} Nov 28 11:28:40 crc kubenswrapper[4923]: I1128 11:28:40.149290 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-fc7d79659-44chg" event={"ID":"a5b68211-f537-41d7-9b3d-859764f26575","Type":"ContainerStarted","Data":"5bb669a20d4a3bbe81398f469cbc6a5eacd6bbdbce51ee47d175bd5a54395cb2"} Nov 28 11:28:41 crc kubenswrapper[4923]: I1128 11:28:41.194844 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b515b733-f22b-448f-a4e9-ec3caf3cc293" path="/var/lib/kubelet/pods/b515b733-f22b-448f-a4e9-ec3caf3cc293/volumes" Nov 28 11:28:41 crc kubenswrapper[4923]: I1128 11:28:41.195883 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" event={"ID":"7ffee751-8b25-4777-8623-ced7082ca426","Type":"ContainerStarted","Data":"78930b26e573a3d162a3e0e1643a8e0aadb8d46ad6bf7cb9f8f3263739f11939"} Nov 28 11:28:41 crc kubenswrapper[4923]: I1128 11:28:41.204422 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0577bf3a-49d8-4540-92f3-fa1703570c2d","Type":"ContainerStarted","Data":"8714dafe9b45f06f9f3de9bb809fcbb70eefd410c24a2352be43a3ae7967b5bb"} Nov 28 11:28:41 crc kubenswrapper[4923]: I1128 11:28:41.204552 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="0577bf3a-49d8-4540-92f3-fa1703570c2d" containerName="cinder-api-log" containerID="cri-o://1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9" gracePeriod=30 Nov 28 11:28:41 crc kubenswrapper[4923]: I1128 11:28:41.204630 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 11:28:41 crc kubenswrapper[4923]: I1128 11:28:41.204658 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="0577bf3a-49d8-4540-92f3-fa1703570c2d" containerName="cinder-api" containerID="cri-o://8714dafe9b45f06f9f3de9bb809fcbb70eefd410c24a2352be43a3ae7967b5bb" gracePeriod=30 Nov 28 11:28:41 crc kubenswrapper[4923]: I1128 11:28:41.225183 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53","Type":"ContainerStarted","Data":"1a8b5e6bab63dbf6cc77462ad4260a1dd4d3c3fb3142df51aac01e60157e1d7e"} Nov 28 11:28:41 crc kubenswrapper[4923]: I1128 11:28:41.253471 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d769371b-b401-4423-beb7-fb8b0ac9e6e0","Type":"ContainerStarted","Data":"cd387ba196b323fcb98b03ad22c4c6809986f3df428171833b3e67c45acf6d28"} Nov 28 11:28:41 crc kubenswrapper[4923]: I1128 11:28:41.268240 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-fc7d79659-44chg" event={"ID":"a5b68211-f537-41d7-9b3d-859764f26575","Type":"ContainerStarted","Data":"e3321d041fd479931cc31d0203426603defa48f63884a292a64365370c69c7a3"} Nov 28 11:28:41 crc kubenswrapper[4923]: I1128 11:28:41.341501 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.3414892290000004 podStartE2EDuration="5.341489229s" podCreationTimestamp="2025-11-28 11:28:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:28:41.340655806 +0000 UTC m=+1200.469340016" watchObservedRunningTime="2025-11-28 11:28:41.341489229 +0000 UTC m=+1200.470173439" Nov 28 11:28:41 crc kubenswrapper[4923]: I1128 11:28:41.342266 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-fc7d79659-44chg" podStartSLOduration=3.422768001 podStartE2EDuration="6.342260661s" podCreationTimestamp="2025-11-28 11:28:35 +0000 UTC" firstStartedPulling="2025-11-28 11:28:36.562962056 +0000 UTC m=+1195.691646266" lastFinishedPulling="2025-11-28 11:28:39.482454716 +0000 UTC m=+1198.611138926" observedRunningTime="2025-11-28 11:28:41.322691833 +0000 UTC m=+1200.451376063" watchObservedRunningTime="2025-11-28 11:28:41.342260661 +0000 UTC m=+1200.470944871" Nov 28 11:28:41 crc kubenswrapper[4923]: I1128 11:28:41.364124 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-58696cc594-f6zjf" podStartSLOduration=3.031295881 podStartE2EDuration="6.364106693s" podCreationTimestamp="2025-11-28 11:28:35 +0000 UTC" firstStartedPulling="2025-11-28 11:28:36.150449117 +0000 UTC m=+1195.279133327" lastFinishedPulling="2025-11-28 11:28:39.483259939 +0000 UTC m=+1198.611944139" observedRunningTime="2025-11-28 11:28:41.361796498 +0000 UTC m=+1200.490480708" watchObservedRunningTime="2025-11-28 11:28:41.364106693 +0000 UTC m=+1200.492790903" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.162308 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.260417 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0577bf3a-49d8-4540-92f3-fa1703570c2d-etc-machine-id\") pod \"0577bf3a-49d8-4540-92f3-fa1703570c2d\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.260532 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0577bf3a-49d8-4540-92f3-fa1703570c2d-logs\") pod \"0577bf3a-49d8-4540-92f3-fa1703570c2d\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.260554 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x6kx5\" (UniqueName: \"kubernetes.io/projected/0577bf3a-49d8-4540-92f3-fa1703570c2d-kube-api-access-x6kx5\") pod \"0577bf3a-49d8-4540-92f3-fa1703570c2d\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.260640 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-scripts\") pod \"0577bf3a-49d8-4540-92f3-fa1703570c2d\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.260660 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-config-data-custom\") pod \"0577bf3a-49d8-4540-92f3-fa1703570c2d\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.260712 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-config-data\") pod \"0577bf3a-49d8-4540-92f3-fa1703570c2d\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.260732 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-combined-ca-bundle\") pod \"0577bf3a-49d8-4540-92f3-fa1703570c2d\" (UID: \"0577bf3a-49d8-4540-92f3-fa1703570c2d\") " Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.265035 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0577bf3a-49d8-4540-92f3-fa1703570c2d-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "0577bf3a-49d8-4540-92f3-fa1703570c2d" (UID: "0577bf3a-49d8-4540-92f3-fa1703570c2d"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.265358 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0577bf3a-49d8-4540-92f3-fa1703570c2d-logs" (OuterVolumeSpecName: "logs") pod "0577bf3a-49d8-4540-92f3-fa1703570c2d" (UID: "0577bf3a-49d8-4540-92f3-fa1703570c2d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.269383 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0577bf3a-49d8-4540-92f3-fa1703570c2d" (UID: "0577bf3a-49d8-4540-92f3-fa1703570c2d"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.271378 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0577bf3a-49d8-4540-92f3-fa1703570c2d-kube-api-access-x6kx5" (OuterVolumeSpecName: "kube-api-access-x6kx5") pod "0577bf3a-49d8-4540-92f3-fa1703570c2d" (UID: "0577bf3a-49d8-4540-92f3-fa1703570c2d"). InnerVolumeSpecName "kube-api-access-x6kx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.287779 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-scripts" (OuterVolumeSpecName: "scripts") pod "0577bf3a-49d8-4540-92f3-fa1703570c2d" (UID: "0577bf3a-49d8-4540-92f3-fa1703570c2d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.316426 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0577bf3a-49d8-4540-92f3-fa1703570c2d" (UID: "0577bf3a-49d8-4540-92f3-fa1703570c2d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.325995 4923 generic.go:334] "Generic (PLEG): container finished" podID="0577bf3a-49d8-4540-92f3-fa1703570c2d" containerID="8714dafe9b45f06f9f3de9bb809fcbb70eefd410c24a2352be43a3ae7967b5bb" exitCode=0 Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.326027 4923 generic.go:334] "Generic (PLEG): container finished" podID="0577bf3a-49d8-4540-92f3-fa1703570c2d" containerID="1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9" exitCode=143 Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.326071 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0577bf3a-49d8-4540-92f3-fa1703570c2d","Type":"ContainerDied","Data":"8714dafe9b45f06f9f3de9bb809fcbb70eefd410c24a2352be43a3ae7967b5bb"} Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.326101 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0577bf3a-49d8-4540-92f3-fa1703570c2d","Type":"ContainerDied","Data":"1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9"} Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.326114 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0577bf3a-49d8-4540-92f3-fa1703570c2d","Type":"ContainerDied","Data":"c26f8607f1a279ccb2ade3e7bd89c6d1aeeed0581d80b936cc24ba4736a2aa81"} Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.326149 4923 scope.go:117] "RemoveContainer" containerID="8714dafe9b45f06f9f3de9bb809fcbb70eefd410c24a2352be43a3ae7967b5bb" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.326282 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.330266 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53","Type":"ContainerStarted","Data":"2c204ba557e5abdb2905d400903dded7f672076feeb64a482b08343ea02647e9"} Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.334019 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d769371b-b401-4423-beb7-fb8b0ac9e6e0","Type":"ContainerStarted","Data":"3881df947bd936a2500c8eb33682405b0fb6b873e56b52521f76b8179985fba2"} Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.344689 4923 scope.go:117] "RemoveContainer" containerID="1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.355558 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.246568647 podStartE2EDuration="6.355541568s" podCreationTimestamp="2025-11-28 11:28:36 +0000 UTC" firstStartedPulling="2025-11-28 11:28:37.373567788 +0000 UTC m=+1196.502251998" lastFinishedPulling="2025-11-28 11:28:39.482540719 +0000 UTC m=+1198.611224919" observedRunningTime="2025-11-28 11:28:42.354847259 +0000 UTC m=+1201.483531469" watchObservedRunningTime="2025-11-28 11:28:42.355541568 +0000 UTC m=+1201.484225778" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.359911 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-config-data" (OuterVolumeSpecName: "config-data") pod "0577bf3a-49d8-4540-92f3-fa1703570c2d" (UID: "0577bf3a-49d8-4540-92f3-fa1703570c2d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.363023 4923 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.363043 4923 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.363055 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.363065 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0577bf3a-49d8-4540-92f3-fa1703570c2d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.363073 4923 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0577bf3a-49d8-4540-92f3-fa1703570c2d-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.363083 4923 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0577bf3a-49d8-4540-92f3-fa1703570c2d-logs\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.363093 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x6kx5\" (UniqueName: \"kubernetes.io/projected/0577bf3a-49d8-4540-92f3-fa1703570c2d-kube-api-access-x6kx5\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.433496 4923 scope.go:117] "RemoveContainer" containerID="8714dafe9b45f06f9f3de9bb809fcbb70eefd410c24a2352be43a3ae7967b5bb" Nov 28 11:28:42 crc kubenswrapper[4923]: E1128 11:28:42.435947 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8714dafe9b45f06f9f3de9bb809fcbb70eefd410c24a2352be43a3ae7967b5bb\": container with ID starting with 8714dafe9b45f06f9f3de9bb809fcbb70eefd410c24a2352be43a3ae7967b5bb not found: ID does not exist" containerID="8714dafe9b45f06f9f3de9bb809fcbb70eefd410c24a2352be43a3ae7967b5bb" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.436002 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8714dafe9b45f06f9f3de9bb809fcbb70eefd410c24a2352be43a3ae7967b5bb"} err="failed to get container status \"8714dafe9b45f06f9f3de9bb809fcbb70eefd410c24a2352be43a3ae7967b5bb\": rpc error: code = NotFound desc = could not find container \"8714dafe9b45f06f9f3de9bb809fcbb70eefd410c24a2352be43a3ae7967b5bb\": container with ID starting with 8714dafe9b45f06f9f3de9bb809fcbb70eefd410c24a2352be43a3ae7967b5bb not found: ID does not exist" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.436033 4923 scope.go:117] "RemoveContainer" containerID="1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9" Nov 28 11:28:42 crc kubenswrapper[4923]: E1128 11:28:42.436493 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9\": container with ID starting with 1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9 not found: ID does not exist" containerID="1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.436522 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9"} err="failed to get container status \"1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9\": rpc error: code = NotFound desc = could not find container \"1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9\": container with ID starting with 1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9 not found: ID does not exist" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.436543 4923 scope.go:117] "RemoveContainer" containerID="8714dafe9b45f06f9f3de9bb809fcbb70eefd410c24a2352be43a3ae7967b5bb" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.436922 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8714dafe9b45f06f9f3de9bb809fcbb70eefd410c24a2352be43a3ae7967b5bb"} err="failed to get container status \"8714dafe9b45f06f9f3de9bb809fcbb70eefd410c24a2352be43a3ae7967b5bb\": rpc error: code = NotFound desc = could not find container \"8714dafe9b45f06f9f3de9bb809fcbb70eefd410c24a2352be43a3ae7967b5bb\": container with ID starting with 8714dafe9b45f06f9f3de9bb809fcbb70eefd410c24a2352be43a3ae7967b5bb not found: ID does not exist" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.436979 4923 scope.go:117] "RemoveContainer" containerID="1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.439253 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9"} err="failed to get container status \"1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9\": rpc error: code = NotFound desc = could not find container \"1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9\": container with ID starting with 1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9 not found: ID does not exist" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.655676 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.661704 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.686039 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 11:28:42 crc kubenswrapper[4923]: E1128 11:28:42.686494 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0577bf3a-49d8-4540-92f3-fa1703570c2d" containerName="cinder-api" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.686585 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="0577bf3a-49d8-4540-92f3-fa1703570c2d" containerName="cinder-api" Nov 28 11:28:42 crc kubenswrapper[4923]: E1128 11:28:42.686643 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0577bf3a-49d8-4540-92f3-fa1703570c2d" containerName="cinder-api-log" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.686688 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="0577bf3a-49d8-4540-92f3-fa1703570c2d" containerName="cinder-api-log" Nov 28 11:28:42 crc kubenswrapper[4923]: E1128 11:28:42.686758 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b515b733-f22b-448f-a4e9-ec3caf3cc293" containerName="init" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.686803 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="b515b733-f22b-448f-a4e9-ec3caf3cc293" containerName="init" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.687018 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="0577bf3a-49d8-4540-92f3-fa1703570c2d" containerName="cinder-api" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.687083 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="0577bf3a-49d8-4540-92f3-fa1703570c2d" containerName="cinder-api-log" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.687149 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="b515b733-f22b-448f-a4e9-ec3caf3cc293" containerName="init" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.688055 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.689962 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.690098 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.690424 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.704325 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.871104 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9add781d-9890-4483-b021-0619d4286428-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.871369 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9add781d-9890-4483-b021-0619d4286428-config-data\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.871394 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9add781d-9890-4483-b021-0619d4286428-public-tls-certs\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.871421 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwbjv\" (UniqueName: \"kubernetes.io/projected/9add781d-9890-4483-b021-0619d4286428-kube-api-access-fwbjv\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.871436 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9add781d-9890-4483-b021-0619d4286428-etc-machine-id\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.871452 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9add781d-9890-4483-b021-0619d4286428-scripts\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.871500 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9add781d-9890-4483-b021-0619d4286428-logs\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.871522 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9add781d-9890-4483-b021-0619d4286428-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.871541 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9add781d-9890-4483-b021-0619d4286428-config-data-custom\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.973246 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9add781d-9890-4483-b021-0619d4286428-config-data-custom\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.973301 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9add781d-9890-4483-b021-0619d4286428-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.973353 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9add781d-9890-4483-b021-0619d4286428-config-data\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.973374 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9add781d-9890-4483-b021-0619d4286428-public-tls-certs\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.973404 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwbjv\" (UniqueName: \"kubernetes.io/projected/9add781d-9890-4483-b021-0619d4286428-kube-api-access-fwbjv\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.973419 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9add781d-9890-4483-b021-0619d4286428-etc-machine-id\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.973434 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9add781d-9890-4483-b021-0619d4286428-scripts\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.973482 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9add781d-9890-4483-b021-0619d4286428-logs\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.973500 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9add781d-9890-4483-b021-0619d4286428-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.974867 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9add781d-9890-4483-b021-0619d4286428-logs\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.975027 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9add781d-9890-4483-b021-0619d4286428-etc-machine-id\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.984206 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9add781d-9890-4483-b021-0619d4286428-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.992582 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9add781d-9890-4483-b021-0619d4286428-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.992980 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9add781d-9890-4483-b021-0619d4286428-config-data\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.993496 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9add781d-9890-4483-b021-0619d4286428-config-data-custom\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.995394 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9add781d-9890-4483-b021-0619d4286428-public-tls-certs\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.997428 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9add781d-9890-4483-b021-0619d4286428-scripts\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:42 crc kubenswrapper[4923]: I1128 11:28:42.998242 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwbjv\" (UniqueName: \"kubernetes.io/projected/9add781d-9890-4483-b021-0619d4286428-kube-api-access-fwbjv\") pod \"cinder-api-0\" (UID: \"9add781d-9890-4483-b021-0619d4286428\") " pod="openstack/cinder-api-0" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.001894 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.114111 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-79797f8fb4-8wx8x"] Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.115446 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.119957 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.129309 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.143364 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-79797f8fb4-8wx8x"] Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.228465 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0577bf3a-49d8-4540-92f3-fa1703570c2d" path="/var/lib/kubelet/pods/0577bf3a-49d8-4540-92f3-fa1703570c2d/volumes" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.281218 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65fad5c3-977a-4ca4-924a-41cac6143073-combined-ca-bundle\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.281596 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65fad5c3-977a-4ca4-924a-41cac6143073-config-data\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.281618 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65fad5c3-977a-4ca4-924a-41cac6143073-config-data-custom\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.281645 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxb5b\" (UniqueName: \"kubernetes.io/projected/65fad5c3-977a-4ca4-924a-41cac6143073-kube-api-access-sxb5b\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.281703 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65fad5c3-977a-4ca4-924a-41cac6143073-public-tls-certs\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.281758 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65fad5c3-977a-4ca4-924a-41cac6143073-logs\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.281830 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/65fad5c3-977a-4ca4-924a-41cac6143073-internal-tls-certs\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.385820 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65fad5c3-977a-4ca4-924a-41cac6143073-config-data\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.385868 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65fad5c3-977a-4ca4-924a-41cac6143073-config-data-custom\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.385887 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxb5b\" (UniqueName: \"kubernetes.io/projected/65fad5c3-977a-4ca4-924a-41cac6143073-kube-api-access-sxb5b\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.385915 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65fad5c3-977a-4ca4-924a-41cac6143073-public-tls-certs\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.385961 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65fad5c3-977a-4ca4-924a-41cac6143073-logs\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.386009 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/65fad5c3-977a-4ca4-924a-41cac6143073-internal-tls-certs\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.386039 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65fad5c3-977a-4ca4-924a-41cac6143073-combined-ca-bundle\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.391842 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65fad5c3-977a-4ca4-924a-41cac6143073-logs\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.397597 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65fad5c3-977a-4ca4-924a-41cac6143073-combined-ca-bundle\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.411102 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/65fad5c3-977a-4ca4-924a-41cac6143073-internal-tls-certs\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.414272 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65fad5c3-977a-4ca4-924a-41cac6143073-config-data-custom\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.414314 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65fad5c3-977a-4ca4-924a-41cac6143073-public-tls-certs\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.415044 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65fad5c3-977a-4ca4-924a-41cac6143073-config-data\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.418416 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxb5b\" (UniqueName: \"kubernetes.io/projected/65fad5c3-977a-4ca4-924a-41cac6143073-kube-api-access-sxb5b\") pod \"barbican-api-79797f8fb4-8wx8x\" (UID: \"65fad5c3-977a-4ca4-924a-41cac6143073\") " pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.494358 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.761791 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 11:28:43 crc kubenswrapper[4923]: I1128 11:28:43.984318 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-79797f8fb4-8wx8x"] Nov 28 11:28:44 crc kubenswrapper[4923]: I1128 11:28:44.377341 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53","Type":"ContainerStarted","Data":"77adc24acbcb0229afdd02b080757f3a4dea527e5b845143c940753923dc966f"} Nov 28 11:28:44 crc kubenswrapper[4923]: I1128 11:28:44.378706 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 11:28:44 crc kubenswrapper[4923]: I1128 11:28:44.380962 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"9add781d-9890-4483-b021-0619d4286428","Type":"ContainerStarted","Data":"848426728d6c1b69533c3b081ef36d51db1b11857336ce992b62b8d581923b36"} Nov 28 11:28:44 crc kubenswrapper[4923]: I1128 11:28:44.382387 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-79797f8fb4-8wx8x" event={"ID":"65fad5c3-977a-4ca4-924a-41cac6143073","Type":"ContainerStarted","Data":"9709c68b32978fa24d08d186ac9d1ba07fd447c012703b0d650f64794a554ae5"} Nov 28 11:28:44 crc kubenswrapper[4923]: I1128 11:28:44.403827 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.814070969 podStartE2EDuration="7.403808649s" podCreationTimestamp="2025-11-28 11:28:37 +0000 UTC" firstStartedPulling="2025-11-28 11:28:38.907106729 +0000 UTC m=+1198.035790939" lastFinishedPulling="2025-11-28 11:28:43.496844409 +0000 UTC m=+1202.625528619" observedRunningTime="2025-11-28 11:28:44.397663747 +0000 UTC m=+1203.526347957" watchObservedRunningTime="2025-11-28 11:28:44.403808649 +0000 UTC m=+1203.532492859" Nov 28 11:28:45 crc kubenswrapper[4923]: I1128 11:28:45.042871 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:45 crc kubenswrapper[4923]: I1128 11:28:45.084219 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-659684c4b8-cg62f" Nov 28 11:28:45 crc kubenswrapper[4923]: I1128 11:28:45.427566 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-79797f8fb4-8wx8x" event={"ID":"65fad5c3-977a-4ca4-924a-41cac6143073","Type":"ContainerStarted","Data":"672bed71a71100adbee98492067f82bd9226b439c4e8186023ced5e1d189733c"} Nov 28 11:28:45 crc kubenswrapper[4923]: I1128 11:28:45.440590 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"9add781d-9890-4483-b021-0619d4286428","Type":"ContainerStarted","Data":"abc8906794c60076ad6d73558be6f039a931f0c74de08d6feda247569ab405a6"} Nov 28 11:28:46 crc kubenswrapper[4923]: I1128 11:28:46.457632 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"9add781d-9890-4483-b021-0619d4286428","Type":"ContainerStarted","Data":"f6ca0b9ea9c114258fbdd23e17429a7ceabaa9551d268b3474bb8369078e9868"} Nov 28 11:28:46 crc kubenswrapper[4923]: I1128 11:28:46.458117 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 11:28:46 crc kubenswrapper[4923]: I1128 11:28:46.461421 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-79797f8fb4-8wx8x" event={"ID":"65fad5c3-977a-4ca4-924a-41cac6143073","Type":"ContainerStarted","Data":"c9c69b8eddf52002e97efd100da456611abf5fa89901e9beb4e8fd9d5495e4c9"} Nov 28 11:28:46 crc kubenswrapper[4923]: I1128 11:28:46.461595 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:46 crc kubenswrapper[4923]: I1128 11:28:46.461633 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:46 crc kubenswrapper[4923]: I1128 11:28:46.477128 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.477108791 podStartE2EDuration="4.477108791s" podCreationTimestamp="2025-11-28 11:28:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:28:46.473302674 +0000 UTC m=+1205.601986884" watchObservedRunningTime="2025-11-28 11:28:46.477108791 +0000 UTC m=+1205.605793011" Nov 28 11:28:46 crc kubenswrapper[4923]: I1128 11:28:46.517869 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 11:28:46 crc kubenswrapper[4923]: I1128 11:28:46.789901 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 11:28:46 crc kubenswrapper[4923]: I1128 11:28:46.809886 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-79797f8fb4-8wx8x" podStartSLOduration=3.809871277 podStartE2EDuration="3.809871277s" podCreationTimestamp="2025-11-28 11:28:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:28:46.498565252 +0000 UTC m=+1205.627249462" watchObservedRunningTime="2025-11-28 11:28:46.809871277 +0000 UTC m=+1205.938555487" Nov 28 11:28:46 crc kubenswrapper[4923]: I1128 11:28:46.851124 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:28:46 crc kubenswrapper[4923]: I1128 11:28:46.941779 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7b946d459c-b9ggp"] Nov 28 11:28:46 crc kubenswrapper[4923]: I1128 11:28:46.942014 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" podUID="73b329c5-451d-4df5-9c61-da9706ee5d52" containerName="dnsmasq-dns" containerID="cri-o://5a0cebbe8280150d56107e83737cca6ba9f9fd5ba02b0fb12f54bc8903de7217" gracePeriod=10 Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.494472 4923 generic.go:334] "Generic (PLEG): container finished" podID="73b329c5-451d-4df5-9c61-da9706ee5d52" containerID="5a0cebbe8280150d56107e83737cca6ba9f9fd5ba02b0fb12f54bc8903de7217" exitCode=0 Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.495456 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" event={"ID":"73b329c5-451d-4df5-9c61-da9706ee5d52","Type":"ContainerDied","Data":"5a0cebbe8280150d56107e83737cca6ba9f9fd5ba02b0fb12f54bc8903de7217"} Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.495478 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" event={"ID":"73b329c5-451d-4df5-9c61-da9706ee5d52","Type":"ContainerDied","Data":"2ea29aecce4858eb4c0f0f5c84cb5f126298e8591683f22100c16ba87d1f8055"} Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.495488 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ea29aecce4858eb4c0f0f5c84cb5f126298e8591683f22100c16ba87d1f8055" Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.531961 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.536952 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.683340 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-dns-svc\") pod \"73b329c5-451d-4df5-9c61-da9706ee5d52\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.683509 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-ovsdbserver-nb\") pod \"73b329c5-451d-4df5-9c61-da9706ee5d52\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.683572 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgxhd\" (UniqueName: \"kubernetes.io/projected/73b329c5-451d-4df5-9c61-da9706ee5d52-kube-api-access-rgxhd\") pod \"73b329c5-451d-4df5-9c61-da9706ee5d52\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.683658 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-config\") pod \"73b329c5-451d-4df5-9c61-da9706ee5d52\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.683717 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-ovsdbserver-sb\") pod \"73b329c5-451d-4df5-9c61-da9706ee5d52\" (UID: \"73b329c5-451d-4df5-9c61-da9706ee5d52\") " Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.691152 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73b329c5-451d-4df5-9c61-da9706ee5d52-kube-api-access-rgxhd" (OuterVolumeSpecName: "kube-api-access-rgxhd") pod "73b329c5-451d-4df5-9c61-da9706ee5d52" (UID: "73b329c5-451d-4df5-9c61-da9706ee5d52"). InnerVolumeSpecName "kube-api-access-rgxhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.742087 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "73b329c5-451d-4df5-9c61-da9706ee5d52" (UID: "73b329c5-451d-4df5-9c61-da9706ee5d52"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.743205 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-config" (OuterVolumeSpecName: "config") pod "73b329c5-451d-4df5-9c61-da9706ee5d52" (UID: "73b329c5-451d-4df5-9c61-da9706ee5d52"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.760884 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "73b329c5-451d-4df5-9c61-da9706ee5d52" (UID: "73b329c5-451d-4df5-9c61-da9706ee5d52"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.781987 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.785880 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.785979 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgxhd\" (UniqueName: \"kubernetes.io/projected/73b329c5-451d-4df5-9c61-da9706ee5d52-kube-api-access-rgxhd\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.786045 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.786101 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.793435 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.793681 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "73b329c5-451d-4df5-9c61-da9706ee5d52" (UID: "73b329c5-451d-4df5-9c61-da9706ee5d52"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:28:47 crc kubenswrapper[4923]: I1128 11:28:47.888087 4923 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73b329c5-451d-4df5-9c61-da9706ee5d52-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:48 crc kubenswrapper[4923]: I1128 11:28:48.508239 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b946d459c-b9ggp" Nov 28 11:28:48 crc kubenswrapper[4923]: I1128 11:28:48.512464 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="d769371b-b401-4423-beb7-fb8b0ac9e6e0" containerName="cinder-scheduler" containerID="cri-o://cd387ba196b323fcb98b03ad22c4c6809986f3df428171833b3e67c45acf6d28" gracePeriod=30 Nov 28 11:28:48 crc kubenswrapper[4923]: I1128 11:28:48.512651 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="d769371b-b401-4423-beb7-fb8b0ac9e6e0" containerName="probe" containerID="cri-o://3881df947bd936a2500c8eb33682405b0fb6b873e56b52521f76b8179985fba2" gracePeriod=30 Nov 28 11:28:48 crc kubenswrapper[4923]: I1128 11:28:48.561678 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7b946d459c-b9ggp"] Nov 28 11:28:48 crc kubenswrapper[4923]: I1128 11:28:48.565979 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7b946d459c-b9ggp"] Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.188765 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73b329c5-451d-4df5-9c61-da9706ee5d52" path="/var/lib/kubelet/pods/73b329c5-451d-4df5-9c61-da9706ee5d52/volumes" Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.307164 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.414281 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-config\") pod \"322dfb9b-c79f-4d58-96eb-265da89196f4\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.414569 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-ovndb-tls-certs\") pod \"322dfb9b-c79f-4d58-96eb-265da89196f4\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.414689 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-httpd-config\") pod \"322dfb9b-c79f-4d58-96eb-265da89196f4\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.414727 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-combined-ca-bundle\") pod \"322dfb9b-c79f-4d58-96eb-265da89196f4\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.414762 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hs6hc\" (UniqueName: \"kubernetes.io/projected/322dfb9b-c79f-4d58-96eb-265da89196f4-kube-api-access-hs6hc\") pod \"322dfb9b-c79f-4d58-96eb-265da89196f4\" (UID: \"322dfb9b-c79f-4d58-96eb-265da89196f4\") " Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.435919 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/322dfb9b-c79f-4d58-96eb-265da89196f4-kube-api-access-hs6hc" (OuterVolumeSpecName: "kube-api-access-hs6hc") pod "322dfb9b-c79f-4d58-96eb-265da89196f4" (UID: "322dfb9b-c79f-4d58-96eb-265da89196f4"). InnerVolumeSpecName "kube-api-access-hs6hc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.449104 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "322dfb9b-c79f-4d58-96eb-265da89196f4" (UID: "322dfb9b-c79f-4d58-96eb-265da89196f4"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.497001 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-config" (OuterVolumeSpecName: "config") pod "322dfb9b-c79f-4d58-96eb-265da89196f4" (UID: "322dfb9b-c79f-4d58-96eb-265da89196f4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.508028 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "322dfb9b-c79f-4d58-96eb-265da89196f4" (UID: "322dfb9b-c79f-4d58-96eb-265da89196f4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.517736 4923 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.517765 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.517792 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hs6hc\" (UniqueName: \"kubernetes.io/projected/322dfb9b-c79f-4d58-96eb-265da89196f4-kube-api-access-hs6hc\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.517802 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.527395 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "322dfb9b-c79f-4d58-96eb-265da89196f4" (UID: "322dfb9b-c79f-4d58-96eb-265da89196f4"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.528886 4923 generic.go:334] "Generic (PLEG): container finished" podID="322dfb9b-c79f-4d58-96eb-265da89196f4" containerID="4c349486c4d323a0c1525ced697c1addeeda2efe88a583d486bc07c71d95db8a" exitCode=0 Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.529041 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-57876647b-g86pl" event={"ID":"322dfb9b-c79f-4d58-96eb-265da89196f4","Type":"ContainerDied","Data":"4c349486c4d323a0c1525ced697c1addeeda2efe88a583d486bc07c71d95db8a"} Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.529071 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-57876647b-g86pl" Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.529090 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-57876647b-g86pl" event={"ID":"322dfb9b-c79f-4d58-96eb-265da89196f4","Type":"ContainerDied","Data":"acdd6a8cd5329c7edb3150653fd5d12972f4ed6f830cde249bc85955ce13756e"} Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.529107 4923 scope.go:117] "RemoveContainer" containerID="18134142dd802445c28c83de513c0e68371687ade17d40fc5952f5934c47d926" Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.547051 4923 generic.go:334] "Generic (PLEG): container finished" podID="d769371b-b401-4423-beb7-fb8b0ac9e6e0" containerID="3881df947bd936a2500c8eb33682405b0fb6b873e56b52521f76b8179985fba2" exitCode=0 Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.547073 4923 generic.go:334] "Generic (PLEG): container finished" podID="d769371b-b401-4423-beb7-fb8b0ac9e6e0" containerID="cd387ba196b323fcb98b03ad22c4c6809986f3df428171833b3e67c45acf6d28" exitCode=0 Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.547104 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d769371b-b401-4423-beb7-fb8b0ac9e6e0","Type":"ContainerDied","Data":"3881df947bd936a2500c8eb33682405b0fb6b873e56b52521f76b8179985fba2"} Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.547122 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d769371b-b401-4423-beb7-fb8b0ac9e6e0","Type":"ContainerDied","Data":"cd387ba196b323fcb98b03ad22c4c6809986f3df428171833b3e67c45acf6d28"} Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.560764 4923 scope.go:117] "RemoveContainer" containerID="4c349486c4d323a0c1525ced697c1addeeda2efe88a583d486bc07c71d95db8a" Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.584390 4923 scope.go:117] "RemoveContainer" containerID="18134142dd802445c28c83de513c0e68371687ade17d40fc5952f5934c47d926" Nov 28 11:28:49 crc kubenswrapper[4923]: E1128 11:28:49.584714 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18134142dd802445c28c83de513c0e68371687ade17d40fc5952f5934c47d926\": container with ID starting with 18134142dd802445c28c83de513c0e68371687ade17d40fc5952f5934c47d926 not found: ID does not exist" containerID="18134142dd802445c28c83de513c0e68371687ade17d40fc5952f5934c47d926" Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.584756 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18134142dd802445c28c83de513c0e68371687ade17d40fc5952f5934c47d926"} err="failed to get container status \"18134142dd802445c28c83de513c0e68371687ade17d40fc5952f5934c47d926\": rpc error: code = NotFound desc = could not find container \"18134142dd802445c28c83de513c0e68371687ade17d40fc5952f5934c47d926\": container with ID starting with 18134142dd802445c28c83de513c0e68371687ade17d40fc5952f5934c47d926 not found: ID does not exist" Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.584775 4923 scope.go:117] "RemoveContainer" containerID="4c349486c4d323a0c1525ced697c1addeeda2efe88a583d486bc07c71d95db8a" Nov 28 11:28:49 crc kubenswrapper[4923]: E1128 11:28:49.584969 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c349486c4d323a0c1525ced697c1addeeda2efe88a583d486bc07c71d95db8a\": container with ID starting with 4c349486c4d323a0c1525ced697c1addeeda2efe88a583d486bc07c71d95db8a not found: ID does not exist" containerID="4c349486c4d323a0c1525ced697c1addeeda2efe88a583d486bc07c71d95db8a" Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.584989 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c349486c4d323a0c1525ced697c1addeeda2efe88a583d486bc07c71d95db8a"} err="failed to get container status \"4c349486c4d323a0c1525ced697c1addeeda2efe88a583d486bc07c71d95db8a\": rpc error: code = NotFound desc = could not find container \"4c349486c4d323a0c1525ced697c1addeeda2efe88a583d486bc07c71d95db8a\": container with ID starting with 4c349486c4d323a0c1525ced697c1addeeda2efe88a583d486bc07c71d95db8a not found: ID does not exist" Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.587525 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-57876647b-g86pl"] Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.592374 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-57876647b-g86pl"] Nov 28 11:28:49 crc kubenswrapper[4923]: I1128 11:28:49.619268 4923 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/322dfb9b-c79f-4d58-96eb-265da89196f4-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.027451 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.126403 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrrkj\" (UniqueName: \"kubernetes.io/projected/d769371b-b401-4423-beb7-fb8b0ac9e6e0-kube-api-access-vrrkj\") pod \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.126494 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-config-data-custom\") pod \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.126560 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-scripts\") pod \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.126593 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d769371b-b401-4423-beb7-fb8b0ac9e6e0-etc-machine-id\") pod \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.126635 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-config-data\") pod \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.126692 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-combined-ca-bundle\") pod \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\" (UID: \"d769371b-b401-4423-beb7-fb8b0ac9e6e0\") " Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.126979 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d769371b-b401-4423-beb7-fb8b0ac9e6e0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d769371b-b401-4423-beb7-fb8b0ac9e6e0" (UID: "d769371b-b401-4423-beb7-fb8b0ac9e6e0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.127528 4923 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d769371b-b401-4423-beb7-fb8b0ac9e6e0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.135083 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d769371b-b401-4423-beb7-fb8b0ac9e6e0-kube-api-access-vrrkj" (OuterVolumeSpecName: "kube-api-access-vrrkj") pod "d769371b-b401-4423-beb7-fb8b0ac9e6e0" (UID: "d769371b-b401-4423-beb7-fb8b0ac9e6e0"). InnerVolumeSpecName "kube-api-access-vrrkj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.135343 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-scripts" (OuterVolumeSpecName: "scripts") pod "d769371b-b401-4423-beb7-fb8b0ac9e6e0" (UID: "d769371b-b401-4423-beb7-fb8b0ac9e6e0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.142054 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d769371b-b401-4423-beb7-fb8b0ac9e6e0" (UID: "d769371b-b401-4423-beb7-fb8b0ac9e6e0"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.229746 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrrkj\" (UniqueName: \"kubernetes.io/projected/d769371b-b401-4423-beb7-fb8b0ac9e6e0-kube-api-access-vrrkj\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.230069 4923 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.230079 4923 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.235061 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d769371b-b401-4423-beb7-fb8b0ac9e6e0" (UID: "d769371b-b401-4423-beb7-fb8b0ac9e6e0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.258255 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-config-data" (OuterVolumeSpecName: "config-data") pod "d769371b-b401-4423-beb7-fb8b0ac9e6e0" (UID: "d769371b-b401-4423-beb7-fb8b0ac9e6e0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.332347 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.332402 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d769371b-b401-4423-beb7-fb8b0ac9e6e0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:28:50 crc kubenswrapper[4923]: E1128 11:28:50.408452 4923 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0577bf3a_49d8_4540_92f3_fa1703570c2d.slice/crio-conmon-1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9.scope\": RecentStats: unable to find data in memory cache]" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.560588 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d769371b-b401-4423-beb7-fb8b0ac9e6e0","Type":"ContainerDied","Data":"69a9e7b824e0ab0f83e0f89dba19aa97a140a10c293b3616ab68d13d10ab924a"} Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.561170 4923 scope.go:117] "RemoveContainer" containerID="3881df947bd936a2500c8eb33682405b0fb6b873e56b52521f76b8179985fba2" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.561141 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.598085 4923 scope.go:117] "RemoveContainer" containerID="cd387ba196b323fcb98b03ad22c4c6809986f3df428171833b3e67c45acf6d28" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.628364 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.644202 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.657993 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 11:28:50 crc kubenswrapper[4923]: E1128 11:28:50.658339 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d769371b-b401-4423-beb7-fb8b0ac9e6e0" containerName="probe" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.658353 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="d769371b-b401-4423-beb7-fb8b0ac9e6e0" containerName="probe" Nov 28 11:28:50 crc kubenswrapper[4923]: E1128 11:28:50.658362 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73b329c5-451d-4df5-9c61-da9706ee5d52" containerName="dnsmasq-dns" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.658370 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="73b329c5-451d-4df5-9c61-da9706ee5d52" containerName="dnsmasq-dns" Nov 28 11:28:50 crc kubenswrapper[4923]: E1128 11:28:50.658380 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73b329c5-451d-4df5-9c61-da9706ee5d52" containerName="init" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.658386 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="73b329c5-451d-4df5-9c61-da9706ee5d52" containerName="init" Nov 28 11:28:50 crc kubenswrapper[4923]: E1128 11:28:50.658403 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="322dfb9b-c79f-4d58-96eb-265da89196f4" containerName="neutron-httpd" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.658408 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="322dfb9b-c79f-4d58-96eb-265da89196f4" containerName="neutron-httpd" Nov 28 11:28:50 crc kubenswrapper[4923]: E1128 11:28:50.658416 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="322dfb9b-c79f-4d58-96eb-265da89196f4" containerName="neutron-api" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.658421 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="322dfb9b-c79f-4d58-96eb-265da89196f4" containerName="neutron-api" Nov 28 11:28:50 crc kubenswrapper[4923]: E1128 11:28:50.658430 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d769371b-b401-4423-beb7-fb8b0ac9e6e0" containerName="cinder-scheduler" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.658435 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="d769371b-b401-4423-beb7-fb8b0ac9e6e0" containerName="cinder-scheduler" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.658574 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="d769371b-b401-4423-beb7-fb8b0ac9e6e0" containerName="probe" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.658584 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="d769371b-b401-4423-beb7-fb8b0ac9e6e0" containerName="cinder-scheduler" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.658596 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="322dfb9b-c79f-4d58-96eb-265da89196f4" containerName="neutron-api" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.658607 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="322dfb9b-c79f-4d58-96eb-265da89196f4" containerName="neutron-httpd" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.658624 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="73b329c5-451d-4df5-9c61-da9706ee5d52" containerName="dnsmasq-dns" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.659428 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.663510 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.681159 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.740498 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6-scripts\") pod \"cinder-scheduler-0\" (UID: \"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.740578 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6-config-data\") pod \"cinder-scheduler-0\" (UID: \"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.740631 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.740646 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.740756 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6ltd\" (UniqueName: \"kubernetes.io/projected/7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6-kube-api-access-z6ltd\") pod \"cinder-scheduler-0\" (UID: \"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.740802 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.842449 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6ltd\" (UniqueName: \"kubernetes.io/projected/7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6-kube-api-access-z6ltd\") pod \"cinder-scheduler-0\" (UID: \"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.842510 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.842598 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6-scripts\") pod \"cinder-scheduler-0\" (UID: \"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.842622 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6-config-data\") pod \"cinder-scheduler-0\" (UID: \"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.842674 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.842686 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.842694 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.847832 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6-config-data\") pod \"cinder-scheduler-0\" (UID: \"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.851631 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.857599 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6-scripts\") pod \"cinder-scheduler-0\" (UID: \"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.861391 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.862493 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6ltd\" (UniqueName: \"kubernetes.io/projected/7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6-kube-api-access-z6ltd\") pod \"cinder-scheduler-0\" (UID: \"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6\") " pod="openstack/cinder-scheduler-0" Nov 28 11:28:50 crc kubenswrapper[4923]: I1128 11:28:50.977749 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 11:28:51 crc kubenswrapper[4923]: I1128 11:28:51.048420 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-698b96f5d-97vsv" Nov 28 11:28:51 crc kubenswrapper[4923]: I1128 11:28:51.189920 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="322dfb9b-c79f-4d58-96eb-265da89196f4" path="/var/lib/kubelet/pods/322dfb9b-c79f-4d58-96eb-265da89196f4/volumes" Nov 28 11:28:51 crc kubenswrapper[4923]: I1128 11:28:51.190688 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d769371b-b401-4423-beb7-fb8b0ac9e6e0" path="/var/lib/kubelet/pods/d769371b-b401-4423-beb7-fb8b0ac9e6e0/volumes" Nov 28 11:28:51 crc kubenswrapper[4923]: I1128 11:28:51.534334 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 11:28:51 crc kubenswrapper[4923]: I1128 11:28:51.571281 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6","Type":"ContainerStarted","Data":"f2e843bf82126b2d17c13d8620020a9057f439cd437ba57ee56330a4e591a099"} Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.008989 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.010194 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.014129 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-dbsfk" Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.014382 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.014810 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.020398 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.064762 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/dac93e05-77d1-4f77-8616-b47fee165189-openstack-config\") pod \"openstackclient\" (UID: \"dac93e05-77d1-4f77-8616-b47fee165189\") " pod="openstack/openstackclient" Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.064833 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7n96\" (UniqueName: \"kubernetes.io/projected/dac93e05-77d1-4f77-8616-b47fee165189-kube-api-access-b7n96\") pod \"openstackclient\" (UID: \"dac93e05-77d1-4f77-8616-b47fee165189\") " pod="openstack/openstackclient" Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.064887 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dac93e05-77d1-4f77-8616-b47fee165189-combined-ca-bundle\") pod \"openstackclient\" (UID: \"dac93e05-77d1-4f77-8616-b47fee165189\") " pod="openstack/openstackclient" Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.064919 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/dac93e05-77d1-4f77-8616-b47fee165189-openstack-config-secret\") pod \"openstackclient\" (UID: \"dac93e05-77d1-4f77-8616-b47fee165189\") " pod="openstack/openstackclient" Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.166626 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dac93e05-77d1-4f77-8616-b47fee165189-combined-ca-bundle\") pod \"openstackclient\" (UID: \"dac93e05-77d1-4f77-8616-b47fee165189\") " pod="openstack/openstackclient" Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.166692 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/dac93e05-77d1-4f77-8616-b47fee165189-openstack-config-secret\") pod \"openstackclient\" (UID: \"dac93e05-77d1-4f77-8616-b47fee165189\") " pod="openstack/openstackclient" Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.166744 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/dac93e05-77d1-4f77-8616-b47fee165189-openstack-config\") pod \"openstackclient\" (UID: \"dac93e05-77d1-4f77-8616-b47fee165189\") " pod="openstack/openstackclient" Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.166788 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7n96\" (UniqueName: \"kubernetes.io/projected/dac93e05-77d1-4f77-8616-b47fee165189-kube-api-access-b7n96\") pod \"openstackclient\" (UID: \"dac93e05-77d1-4f77-8616-b47fee165189\") " pod="openstack/openstackclient" Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.168968 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/dac93e05-77d1-4f77-8616-b47fee165189-openstack-config\") pod \"openstackclient\" (UID: \"dac93e05-77d1-4f77-8616-b47fee165189\") " pod="openstack/openstackclient" Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.174562 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/dac93e05-77d1-4f77-8616-b47fee165189-openstack-config-secret\") pod \"openstackclient\" (UID: \"dac93e05-77d1-4f77-8616-b47fee165189\") " pod="openstack/openstackclient" Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.187038 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dac93e05-77d1-4f77-8616-b47fee165189-combined-ca-bundle\") pod \"openstackclient\" (UID: \"dac93e05-77d1-4f77-8616-b47fee165189\") " pod="openstack/openstackclient" Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.203233 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7n96\" (UniqueName: \"kubernetes.io/projected/dac93e05-77d1-4f77-8616-b47fee165189-kube-api-access-b7n96\") pod \"openstackclient\" (UID: \"dac93e05-77d1-4f77-8616-b47fee165189\") " pod="openstack/openstackclient" Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.331257 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.613229 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6","Type":"ContainerStarted","Data":"9da9f703cd16f3afe09d5a1256122f6a4d8f4ad1bb343b47e97df7fe0b7821b5"} Nov 28 11:28:52 crc kubenswrapper[4923]: I1128 11:28:52.927410 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 11:28:52 crc kubenswrapper[4923]: W1128 11:28:52.958109 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddac93e05_77d1_4f77_8616_b47fee165189.slice/crio-c93f726577d2594a094b73b6edd1782221c81f64ea4d7b70a7c689c94d0549e0 WatchSource:0}: Error finding container c93f726577d2594a094b73b6edd1782221c81f64ea4d7b70a7c689c94d0549e0: Status 404 returned error can't find the container with id c93f726577d2594a094b73b6edd1782221c81f64ea4d7b70a7c689c94d0549e0 Nov 28 11:28:53 crc kubenswrapper[4923]: I1128 11:28:53.623705 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6","Type":"ContainerStarted","Data":"25d9cef263fad2e08b45ab57e0990fb9e3a54ca17e6ced9ccef087408bee2c79"} Nov 28 11:28:53 crc kubenswrapper[4923]: I1128 11:28:53.627314 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"dac93e05-77d1-4f77-8616-b47fee165189","Type":"ContainerStarted","Data":"c93f726577d2594a094b73b6edd1782221c81f64ea4d7b70a7c689c94d0549e0"} Nov 28 11:28:55 crc kubenswrapper[4923]: I1128 11:28:55.811089 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:55 crc kubenswrapper[4923]: I1128 11:28:55.834985 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=5.834970323 podStartE2EDuration="5.834970323s" podCreationTimestamp="2025-11-28 11:28:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:28:53.656152528 +0000 UTC m=+1212.784836738" watchObservedRunningTime="2025-11-28 11:28:55.834970323 +0000 UTC m=+1214.963654533" Nov 28 11:28:55 crc kubenswrapper[4923]: I1128 11:28:55.919523 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 28 11:28:55 crc kubenswrapper[4923]: I1128 11:28:55.978543 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 11:28:56 crc kubenswrapper[4923]: I1128 11:28:56.004484 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-79797f8fb4-8wx8x" Nov 28 11:28:56 crc kubenswrapper[4923]: I1128 11:28:56.071672 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-86dc447b-tv8qs"] Nov 28 11:28:56 crc kubenswrapper[4923]: I1128 11:28:56.071874 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-86dc447b-tv8qs" podUID="216d256f-0479-4b10-bf92-f59aef3136bc" containerName="barbican-api-log" containerID="cri-o://1182aaafabe027a8ccbdf4064f9d7a64ba5036316d416faf774b1d223fabd706" gracePeriod=30 Nov 28 11:28:56 crc kubenswrapper[4923]: I1128 11:28:56.072142 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-86dc447b-tv8qs" podUID="216d256f-0479-4b10-bf92-f59aef3136bc" containerName="barbican-api" containerID="cri-o://4ad8877683db2431c87adaea4885f969af2b0eeb49a3d655c2a2c8ffd67d706c" gracePeriod=30 Nov 28 11:28:56 crc kubenswrapper[4923]: I1128 11:28:56.666188 4923 generic.go:334] "Generic (PLEG): container finished" podID="216d256f-0479-4b10-bf92-f59aef3136bc" containerID="1182aaafabe027a8ccbdf4064f9d7a64ba5036316d416faf774b1d223fabd706" exitCode=143 Nov 28 11:28:56 crc kubenswrapper[4923]: I1128 11:28:56.666371 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86dc447b-tv8qs" event={"ID":"216d256f-0479-4b10-bf92-f59aef3136bc","Type":"ContainerDied","Data":"1182aaafabe027a8ccbdf4064f9d7a64ba5036316d416faf774b1d223fabd706"} Nov 28 11:28:59 crc kubenswrapper[4923]: I1128 11:28:59.711684 4923 generic.go:334] "Generic (PLEG): container finished" podID="216d256f-0479-4b10-bf92-f59aef3136bc" containerID="4ad8877683db2431c87adaea4885f969af2b0eeb49a3d655c2a2c8ffd67d706c" exitCode=0 Nov 28 11:28:59 crc kubenswrapper[4923]: I1128 11:28:59.711857 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86dc447b-tv8qs" event={"ID":"216d256f-0479-4b10-bf92-f59aef3136bc","Type":"ContainerDied","Data":"4ad8877683db2431c87adaea4885f969af2b0eeb49a3d655c2a2c8ffd67d706c"} Nov 28 11:29:00 crc kubenswrapper[4923]: I1128 11:29:00.297090 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:00 crc kubenswrapper[4923]: I1128 11:29:00.297660 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerName="ceilometer-central-agent" containerID="cri-o://88f0dac3bd9080ba61e7e1b7e5bf76c7b362aaedeb7246a4b2430b69948f1267" gracePeriod=30 Nov 28 11:29:00 crc kubenswrapper[4923]: I1128 11:29:00.297844 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerName="proxy-httpd" containerID="cri-o://77adc24acbcb0229afdd02b080757f3a4dea527e5b845143c940753923dc966f" gracePeriod=30 Nov 28 11:29:00 crc kubenswrapper[4923]: I1128 11:29:00.298027 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerName="sg-core" containerID="cri-o://2c204ba557e5abdb2905d400903dded7f672076feeb64a482b08343ea02647e9" gracePeriod=30 Nov 28 11:29:00 crc kubenswrapper[4923]: I1128 11:29:00.298129 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerName="ceilometer-notification-agent" containerID="cri-o://1a8b5e6bab63dbf6cc77462ad4260a1dd4d3c3fb3142df51aac01e60157e1d7e" gracePeriod=30 Nov 28 11:29:00 crc kubenswrapper[4923]: I1128 11:29:00.314333 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.149:3000/\": EOF" Nov 28 11:29:00 crc kubenswrapper[4923]: E1128 11:29:00.669215 4923 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0577bf3a_49d8_4540_92f3_fa1703570c2d.slice/crio-conmon-1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9.scope\": RecentStats: unable to find data in memory cache]" Nov 28 11:29:00 crc kubenswrapper[4923]: I1128 11:29:00.715050 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-86dc447b-tv8qs" podUID="216d256f-0479-4b10-bf92-f59aef3136bc" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.145:9311/healthcheck\": dial tcp 10.217.0.145:9311: connect: connection refused" Nov 28 11:29:00 crc kubenswrapper[4923]: I1128 11:29:00.717268 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-86dc447b-tv8qs" podUID="216d256f-0479-4b10-bf92-f59aef3136bc" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.145:9311/healthcheck\": dial tcp 10.217.0.145:9311: connect: connection refused" Nov 28 11:29:00 crc kubenswrapper[4923]: I1128 11:29:00.728348 4923 generic.go:334] "Generic (PLEG): container finished" podID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerID="77adc24acbcb0229afdd02b080757f3a4dea527e5b845143c940753923dc966f" exitCode=0 Nov 28 11:29:00 crc kubenswrapper[4923]: I1128 11:29:00.728610 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53","Type":"ContainerDied","Data":"77adc24acbcb0229afdd02b080757f3a4dea527e5b845143c940753923dc966f"} Nov 28 11:29:00 crc kubenswrapper[4923]: I1128 11:29:00.728709 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53","Type":"ContainerDied","Data":"2c204ba557e5abdb2905d400903dded7f672076feeb64a482b08343ea02647e9"} Nov 28 11:29:00 crc kubenswrapper[4923]: I1128 11:29:00.728659 4923 generic.go:334] "Generic (PLEG): container finished" podID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerID="2c204ba557e5abdb2905d400903dded7f672076feeb64a482b08343ea02647e9" exitCode=2 Nov 28 11:29:00 crc kubenswrapper[4923]: I1128 11:29:00.728733 4923 generic.go:334] "Generic (PLEG): container finished" podID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerID="88f0dac3bd9080ba61e7e1b7e5bf76c7b362aaedeb7246a4b2430b69948f1267" exitCode=0 Nov 28 11:29:00 crc kubenswrapper[4923]: I1128 11:29:00.728756 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53","Type":"ContainerDied","Data":"88f0dac3bd9080ba61e7e1b7e5bf76c7b362aaedeb7246a4b2430b69948f1267"} Nov 28 11:29:01 crc kubenswrapper[4923]: I1128 11:29:01.303138 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 11:29:01 crc kubenswrapper[4923]: I1128 11:29:01.740333 4923 generic.go:334] "Generic (PLEG): container finished" podID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerID="1a8b5e6bab63dbf6cc77462ad4260a1dd4d3c3fb3142df51aac01e60157e1d7e" exitCode=0 Nov 28 11:29:01 crc kubenswrapper[4923]: I1128 11:29:01.740371 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53","Type":"ContainerDied","Data":"1a8b5e6bab63dbf6cc77462ad4260a1dd4d3c3fb3142df51aac01e60157e1d7e"} Nov 28 11:29:04 crc kubenswrapper[4923]: I1128 11:29:04.996689 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.061272 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.101858 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/216d256f-0479-4b10-bf92-f59aef3136bc-config-data\") pod \"216d256f-0479-4b10-bf92-f59aef3136bc\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.101920 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlxr6\" (UniqueName: \"kubernetes.io/projected/216d256f-0479-4b10-bf92-f59aef3136bc-kube-api-access-jlxr6\") pod \"216d256f-0479-4b10-bf92-f59aef3136bc\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.101985 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/216d256f-0479-4b10-bf92-f59aef3136bc-config-data-custom\") pod \"216d256f-0479-4b10-bf92-f59aef3136bc\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.102023 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/216d256f-0479-4b10-bf92-f59aef3136bc-logs\") pod \"216d256f-0479-4b10-bf92-f59aef3136bc\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.102128 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/216d256f-0479-4b10-bf92-f59aef3136bc-combined-ca-bundle\") pod \"216d256f-0479-4b10-bf92-f59aef3136bc\" (UID: \"216d256f-0479-4b10-bf92-f59aef3136bc\") " Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.107309 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/216d256f-0479-4b10-bf92-f59aef3136bc-logs" (OuterVolumeSpecName: "logs") pod "216d256f-0479-4b10-bf92-f59aef3136bc" (UID: "216d256f-0479-4b10-bf92-f59aef3136bc"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.108609 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/216d256f-0479-4b10-bf92-f59aef3136bc-kube-api-access-jlxr6" (OuterVolumeSpecName: "kube-api-access-jlxr6") pod "216d256f-0479-4b10-bf92-f59aef3136bc" (UID: "216d256f-0479-4b10-bf92-f59aef3136bc"). InnerVolumeSpecName "kube-api-access-jlxr6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.111156 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/216d256f-0479-4b10-bf92-f59aef3136bc-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "216d256f-0479-4b10-bf92-f59aef3136bc" (UID: "216d256f-0479-4b10-bf92-f59aef3136bc"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.123442 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/216d256f-0479-4b10-bf92-f59aef3136bc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "216d256f-0479-4b10-bf92-f59aef3136bc" (UID: "216d256f-0479-4b10-bf92-f59aef3136bc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.143466 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/216d256f-0479-4b10-bf92-f59aef3136bc-config-data" (OuterVolumeSpecName: "config-data") pod "216d256f-0479-4b10-bf92-f59aef3136bc" (UID: "216d256f-0479-4b10-bf92-f59aef3136bc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.204001 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-log-httpd\") pod \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.204062 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-combined-ca-bundle\") pod \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.204094 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-sg-core-conf-yaml\") pod \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.204123 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-scripts\") pod \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.204151 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tbzlk\" (UniqueName: \"kubernetes.io/projected/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-kube-api-access-tbzlk\") pod \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.204222 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-run-httpd\") pod \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.204244 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-config-data\") pod \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\" (UID: \"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53\") " Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.204657 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/216d256f-0479-4b10-bf92-f59aef3136bc-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.204674 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlxr6\" (UniqueName: \"kubernetes.io/projected/216d256f-0479-4b10-bf92-f59aef3136bc-kube-api-access-jlxr6\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.204685 4923 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/216d256f-0479-4b10-bf92-f59aef3136bc-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.204696 4923 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/216d256f-0479-4b10-bf92-f59aef3136bc-logs\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.204703 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/216d256f-0479-4b10-bf92-f59aef3136bc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.205971 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" (UID: "9bafb2b1-e8b0-458d-a6c9-e18b31b73c53"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.206741 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" (UID: "9bafb2b1-e8b0-458d-a6c9-e18b31b73c53"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.208400 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-scripts" (OuterVolumeSpecName: "scripts") pod "9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" (UID: "9bafb2b1-e8b0-458d-a6c9-e18b31b73c53"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.217359 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-kube-api-access-tbzlk" (OuterVolumeSpecName: "kube-api-access-tbzlk") pod "9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" (UID: "9bafb2b1-e8b0-458d-a6c9-e18b31b73c53"). InnerVolumeSpecName "kube-api-access-tbzlk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.245078 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" (UID: "9bafb2b1-e8b0-458d-a6c9-e18b31b73c53"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.275191 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" (UID: "9bafb2b1-e8b0-458d-a6c9-e18b31b73c53"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.279351 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-config-data" (OuterVolumeSpecName: "config-data") pod "9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" (UID: "9bafb2b1-e8b0-458d-a6c9-e18b31b73c53"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.306494 4923 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.306545 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.306564 4923 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.306582 4923 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.306599 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tbzlk\" (UniqueName: \"kubernetes.io/projected/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-kube-api-access-tbzlk\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.306615 4923 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.306632 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.791863 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9bafb2b1-e8b0-458d-a6c9-e18b31b73c53","Type":"ContainerDied","Data":"feca1db351b861bc494151e862361e7d81763093f60c6eb7a95b13b547f521f5"} Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.791922 4923 scope.go:117] "RemoveContainer" containerID="77adc24acbcb0229afdd02b080757f3a4dea527e5b845143c940753923dc966f" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.791945 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.794524 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-86dc447b-tv8qs" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.794547 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86dc447b-tv8qs" event={"ID":"216d256f-0479-4b10-bf92-f59aef3136bc","Type":"ContainerDied","Data":"6a5a60f6a063d10036cb0a515ccd528686b2aa9ab2f427716f164cf121791545"} Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.802547 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"dac93e05-77d1-4f77-8616-b47fee165189","Type":"ContainerStarted","Data":"e294ae829b6a2737da6bb4ca879e19c26a47639bf076019cfa0ad89e7ea64b7a"} Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.832471 4923 scope.go:117] "RemoveContainer" containerID="2c204ba557e5abdb2905d400903dded7f672076feeb64a482b08343ea02647e9" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.882888 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-86dc447b-tv8qs"] Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.885888 4923 scope.go:117] "RemoveContainer" containerID="1a8b5e6bab63dbf6cc77462ad4260a1dd4d3c3fb3142df51aac01e60157e1d7e" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.896718 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-86dc447b-tv8qs"] Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.899906 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.106174076 podStartE2EDuration="14.89988798s" podCreationTimestamp="2025-11-28 11:28:51 +0000 UTC" firstStartedPulling="2025-11-28 11:28:52.962464468 +0000 UTC m=+1212.091148678" lastFinishedPulling="2025-11-28 11:29:04.756178372 +0000 UTC m=+1223.884862582" observedRunningTime="2025-11-28 11:29:05.832863453 +0000 UTC m=+1224.961547683" watchObservedRunningTime="2025-11-28 11:29:05.89988798 +0000 UTC m=+1225.028572190" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.915946 4923 scope.go:117] "RemoveContainer" containerID="88f0dac3bd9080ba61e7e1b7e5bf76c7b362aaedeb7246a4b2430b69948f1267" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.922646 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.931309 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.936860 4923 scope.go:117] "RemoveContainer" containerID="4ad8877683db2431c87adaea4885f969af2b0eeb49a3d655c2a2c8ffd67d706c" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.941038 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:05 crc kubenswrapper[4923]: E1128 11:29:05.941365 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="216d256f-0479-4b10-bf92-f59aef3136bc" containerName="barbican-api-log" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.941382 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="216d256f-0479-4b10-bf92-f59aef3136bc" containerName="barbican-api-log" Nov 28 11:29:05 crc kubenswrapper[4923]: E1128 11:29:05.941396 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerName="proxy-httpd" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.941403 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerName="proxy-httpd" Nov 28 11:29:05 crc kubenswrapper[4923]: E1128 11:29:05.941416 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerName="ceilometer-central-agent" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.941423 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerName="ceilometer-central-agent" Nov 28 11:29:05 crc kubenswrapper[4923]: E1128 11:29:05.941435 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerName="sg-core" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.941441 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerName="sg-core" Nov 28 11:29:05 crc kubenswrapper[4923]: E1128 11:29:05.941452 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="216d256f-0479-4b10-bf92-f59aef3136bc" containerName="barbican-api" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.941460 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="216d256f-0479-4b10-bf92-f59aef3136bc" containerName="barbican-api" Nov 28 11:29:05 crc kubenswrapper[4923]: E1128 11:29:05.941471 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerName="ceilometer-notification-agent" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.941476 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerName="ceilometer-notification-agent" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.941631 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="216d256f-0479-4b10-bf92-f59aef3136bc" containerName="barbican-api-log" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.941645 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerName="ceilometer-notification-agent" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.941657 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="216d256f-0479-4b10-bf92-f59aef3136bc" containerName="barbican-api" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.941665 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerName="proxy-httpd" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.941677 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerName="ceilometer-central-agent" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.941686 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" containerName="sg-core" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.953159 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.955062 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.955596 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.965487 4923 scope.go:117] "RemoveContainer" containerID="1182aaafabe027a8ccbdf4064f9d7a64ba5036316d416faf774b1d223fabd706" Nov 28 11:29:05 crc kubenswrapper[4923]: I1128 11:29:05.972390 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.119554 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-run-httpd\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.119610 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-scripts\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.119773 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-config-data\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.120021 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwfxk\" (UniqueName: \"kubernetes.io/projected/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-kube-api-access-dwfxk\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.120121 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-log-httpd\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.120214 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.120235 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.222135 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.222178 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.222235 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-run-httpd\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.222261 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-scripts\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.222284 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-config-data\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.222324 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwfxk\" (UniqueName: \"kubernetes.io/projected/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-kube-api-access-dwfxk\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.222352 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-log-httpd\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.223074 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-log-httpd\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.223418 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-run-httpd\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.227805 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.229188 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-config-data\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.235482 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.236218 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-scripts\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.249131 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwfxk\" (UniqueName: \"kubernetes.io/projected/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-kube-api-access-dwfxk\") pod \"ceilometer-0\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.270306 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.752038 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:06 crc kubenswrapper[4923]: I1128 11:29:06.813843 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a","Type":"ContainerStarted","Data":"9d9a79cead10d3a1bc2f81b81b16944a46da32e1b69567248c0abc9afbba971c"} Nov 28 11:29:07 crc kubenswrapper[4923]: I1128 11:29:07.178160 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="216d256f-0479-4b10-bf92-f59aef3136bc" path="/var/lib/kubelet/pods/216d256f-0479-4b10-bf92-f59aef3136bc/volumes" Nov 28 11:29:07 crc kubenswrapper[4923]: I1128 11:29:07.178764 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9bafb2b1-e8b0-458d-a6c9-e18b31b73c53" path="/var/lib/kubelet/pods/9bafb2b1-e8b0-458d-a6c9-e18b31b73c53/volumes" Nov 28 11:29:07 crc kubenswrapper[4923]: I1128 11:29:07.824127 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a","Type":"ContainerStarted","Data":"0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b"} Nov 28 11:29:08 crc kubenswrapper[4923]: I1128 11:29:08.833403 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a","Type":"ContainerStarted","Data":"0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b"} Nov 28 11:29:08 crc kubenswrapper[4923]: I1128 11:29:08.833681 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a","Type":"ContainerStarted","Data":"d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409"} Nov 28 11:29:10 crc kubenswrapper[4923]: I1128 11:29:10.204324 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:10 crc kubenswrapper[4923]: I1128 11:29:10.850568 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a","Type":"ContainerStarted","Data":"c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e"} Nov 28 11:29:10 crc kubenswrapper[4923]: I1128 11:29:10.851431 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 11:29:10 crc kubenswrapper[4923]: I1128 11:29:10.875040 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.960318881 podStartE2EDuration="5.875022278s" podCreationTimestamp="2025-11-28 11:29:05 +0000 UTC" firstStartedPulling="2025-11-28 11:29:06.774906276 +0000 UTC m=+1225.903590486" lastFinishedPulling="2025-11-28 11:29:09.689609673 +0000 UTC m=+1228.818293883" observedRunningTime="2025-11-28 11:29:10.868375152 +0000 UTC m=+1229.997059362" watchObservedRunningTime="2025-11-28 11:29:10.875022278 +0000 UTC m=+1230.003706488" Nov 28 11:29:10 crc kubenswrapper[4923]: E1128 11:29:10.907077 4923 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0577bf3a_49d8_4540_92f3_fa1703570c2d.slice/crio-conmon-1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9.scope\": RecentStats: unable to find data in memory cache]" Nov 28 11:29:11 crc kubenswrapper[4923]: I1128 11:29:11.858805 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerName="ceilometer-central-agent" containerID="cri-o://0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b" gracePeriod=30 Nov 28 11:29:11 crc kubenswrapper[4923]: I1128 11:29:11.859101 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerName="ceilometer-notification-agent" containerID="cri-o://d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409" gracePeriod=30 Nov 28 11:29:11 crc kubenswrapper[4923]: I1128 11:29:11.859195 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerName="proxy-httpd" containerID="cri-o://c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e" gracePeriod=30 Nov 28 11:29:11 crc kubenswrapper[4923]: I1128 11:29:11.859184 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerName="sg-core" containerID="cri-o://0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b" gracePeriod=30 Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.660986 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.829924 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-sg-core-conf-yaml\") pod \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.830007 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-scripts\") pod \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.830072 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dwfxk\" (UniqueName: \"kubernetes.io/projected/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-kube-api-access-dwfxk\") pod \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.830179 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-log-httpd\") pod \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.830277 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-run-httpd\") pod \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.830304 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-combined-ca-bundle\") pod \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.830385 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-config-data\") pod \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\" (UID: \"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a\") " Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.830622 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" (UID: "e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.830751 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" (UID: "e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.831139 4923 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.831167 4923 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.835695 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-kube-api-access-dwfxk" (OuterVolumeSpecName: "kube-api-access-dwfxk") pod "e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" (UID: "e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a"). InnerVolumeSpecName "kube-api-access-dwfxk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.836126 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-scripts" (OuterVolumeSpecName: "scripts") pod "e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" (UID: "e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.855702 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" (UID: "e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.870186 4923 generic.go:334] "Generic (PLEG): container finished" podID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerID="c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e" exitCode=0 Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.870230 4923 generic.go:334] "Generic (PLEG): container finished" podID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerID="0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b" exitCode=2 Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.870240 4923 generic.go:334] "Generic (PLEG): container finished" podID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerID="d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409" exitCode=0 Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.870245 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.870249 4923 generic.go:334] "Generic (PLEG): container finished" podID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerID="0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b" exitCode=0 Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.870277 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a","Type":"ContainerDied","Data":"c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e"} Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.870652 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a","Type":"ContainerDied","Data":"0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b"} Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.870669 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a","Type":"ContainerDied","Data":"d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409"} Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.870681 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a","Type":"ContainerDied","Data":"0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b"} Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.870691 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a","Type":"ContainerDied","Data":"9d9a79cead10d3a1bc2f81b81b16944a46da32e1b69567248c0abc9afbba971c"} Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.870720 4923 scope.go:117] "RemoveContainer" containerID="c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.899961 4923 scope.go:117] "RemoveContainer" containerID="0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.916511 4923 scope.go:117] "RemoveContainer" containerID="d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.929198 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" (UID: "e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.931728 4923 scope.go:117] "RemoveContainer" containerID="0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.933872 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.933897 4923 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.933907 4923 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.933918 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dwfxk\" (UniqueName: \"kubernetes.io/projected/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-kube-api-access-dwfxk\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.950532 4923 scope.go:117] "RemoveContainer" containerID="c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e" Nov 28 11:29:12 crc kubenswrapper[4923]: E1128 11:29:12.951045 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e\": container with ID starting with c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e not found: ID does not exist" containerID="c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.951075 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e"} err="failed to get container status \"c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e\": rpc error: code = NotFound desc = could not find container \"c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e\": container with ID starting with c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e not found: ID does not exist" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.951095 4923 scope.go:117] "RemoveContainer" containerID="0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b" Nov 28 11:29:12 crc kubenswrapper[4923]: E1128 11:29:12.951362 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b\": container with ID starting with 0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b not found: ID does not exist" containerID="0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.951381 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b"} err="failed to get container status \"0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b\": rpc error: code = NotFound desc = could not find container \"0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b\": container with ID starting with 0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b not found: ID does not exist" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.951393 4923 scope.go:117] "RemoveContainer" containerID="d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409" Nov 28 11:29:12 crc kubenswrapper[4923]: E1128 11:29:12.951733 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409\": container with ID starting with d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409 not found: ID does not exist" containerID="d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.951752 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409"} err="failed to get container status \"d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409\": rpc error: code = NotFound desc = could not find container \"d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409\": container with ID starting with d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409 not found: ID does not exist" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.951764 4923 scope.go:117] "RemoveContainer" containerID="0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b" Nov 28 11:29:12 crc kubenswrapper[4923]: E1128 11:29:12.952247 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b\": container with ID starting with 0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b not found: ID does not exist" containerID="0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.952268 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b"} err="failed to get container status \"0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b\": rpc error: code = NotFound desc = could not find container \"0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b\": container with ID starting with 0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b not found: ID does not exist" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.952282 4923 scope.go:117] "RemoveContainer" containerID="c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.952609 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e"} err="failed to get container status \"c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e\": rpc error: code = NotFound desc = could not find container \"c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e\": container with ID starting with c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e not found: ID does not exist" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.952627 4923 scope.go:117] "RemoveContainer" containerID="0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.952973 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b"} err="failed to get container status \"0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b\": rpc error: code = NotFound desc = could not find container \"0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b\": container with ID starting with 0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b not found: ID does not exist" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.952995 4923 scope.go:117] "RemoveContainer" containerID="d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.953209 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409"} err="failed to get container status \"d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409\": rpc error: code = NotFound desc = could not find container \"d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409\": container with ID starting with d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409 not found: ID does not exist" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.953227 4923 scope.go:117] "RemoveContainer" containerID="0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.953418 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b"} err="failed to get container status \"0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b\": rpc error: code = NotFound desc = could not find container \"0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b\": container with ID starting with 0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b not found: ID does not exist" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.953434 4923 scope.go:117] "RemoveContainer" containerID="c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.953716 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e"} err="failed to get container status \"c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e\": rpc error: code = NotFound desc = could not find container \"c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e\": container with ID starting with c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e not found: ID does not exist" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.953733 4923 scope.go:117] "RemoveContainer" containerID="0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.954006 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b"} err="failed to get container status \"0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b\": rpc error: code = NotFound desc = could not find container \"0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b\": container with ID starting with 0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b not found: ID does not exist" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.954025 4923 scope.go:117] "RemoveContainer" containerID="d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.954251 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409"} err="failed to get container status \"d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409\": rpc error: code = NotFound desc = could not find container \"d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409\": container with ID starting with d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409 not found: ID does not exist" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.954270 4923 scope.go:117] "RemoveContainer" containerID="0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.955018 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b"} err="failed to get container status \"0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b\": rpc error: code = NotFound desc = could not find container \"0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b\": container with ID starting with 0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b not found: ID does not exist" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.955076 4923 scope.go:117] "RemoveContainer" containerID="c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.955395 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e"} err="failed to get container status \"c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e\": rpc error: code = NotFound desc = could not find container \"c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e\": container with ID starting with c9acdd981396a93ce646904b1c785645996b9f448464aa0b0bd54bf6bbeaff3e not found: ID does not exist" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.955421 4923 scope.go:117] "RemoveContainer" containerID="0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.955787 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b"} err="failed to get container status \"0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b\": rpc error: code = NotFound desc = could not find container \"0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b\": container with ID starting with 0ff29eff39322e2879a42be9400a4942df844363e717f17fd4c3e31174fad29b not found: ID does not exist" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.955828 4923 scope.go:117] "RemoveContainer" containerID="d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.956243 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409"} err="failed to get container status \"d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409\": rpc error: code = NotFound desc = could not find container \"d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409\": container with ID starting with d80eb0485a431a87c340b8f39684f0710b113a7630c24c41589bccd59dfbb409 not found: ID does not exist" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.956266 4923 scope.go:117] "RemoveContainer" containerID="0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.956559 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b"} err="failed to get container status \"0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b\": rpc error: code = NotFound desc = could not find container \"0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b\": container with ID starting with 0c722d6638d136eb15d171f6d65ae68bf692b17d307f01debcd1e30789700b4b not found: ID does not exist" Nov 28 11:29:12 crc kubenswrapper[4923]: I1128 11:29:12.963753 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-config-data" (OuterVolumeSpecName: "config-data") pod "e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" (UID: "e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.035846 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.202463 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.211298 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.229246 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-s4rht"] Nov 28 11:29:13 crc kubenswrapper[4923]: E1128 11:29:13.229532 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerName="ceilometer-central-agent" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.229549 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerName="ceilometer-central-agent" Nov 28 11:29:13 crc kubenswrapper[4923]: E1128 11:29:13.229568 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerName="sg-core" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.229574 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerName="sg-core" Nov 28 11:29:13 crc kubenswrapper[4923]: E1128 11:29:13.229590 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerName="ceilometer-notification-agent" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.229596 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerName="ceilometer-notification-agent" Nov 28 11:29:13 crc kubenswrapper[4923]: E1128 11:29:13.229615 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerName="proxy-httpd" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.229621 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerName="proxy-httpd" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.229756 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerName="ceilometer-central-agent" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.229766 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerName="sg-core" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.229778 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerName="ceilometer-notification-agent" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.229787 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" containerName="proxy-httpd" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.230298 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-s4rht" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.249278 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-s4rht"] Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.254608 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.262078 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.264193 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.266431 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.287505 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.341780 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xc7pq\" (UniqueName: \"kubernetes.io/projected/0884198d-4d47-4e53-8bb6-ea2e8365cadd-kube-api-access-xc7pq\") pod \"nova-api-db-create-s4rht\" (UID: \"0884198d-4d47-4e53-8bb6-ea2e8365cadd\") " pod="openstack/nova-api-db-create-s4rht" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.341834 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0884198d-4d47-4e53-8bb6-ea2e8365cadd-operator-scripts\") pod \"nova-api-db-create-s4rht\" (UID: \"0884198d-4d47-4e53-8bb6-ea2e8365cadd\") " pod="openstack/nova-api-db-create-s4rht" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.345321 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-v9nls"] Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.346708 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-v9nls" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.359234 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-v9nls"] Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.377857 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-e361-account-create-update-g2428"] Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.379271 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e361-account-create-update-g2428" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.382043 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.400406 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-e361-account-create-update-g2428"] Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.439445 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-nfvs8"] Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.440410 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-nfvs8" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.442861 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snmkg\" (UniqueName: \"kubernetes.io/projected/b6609a58-f8a7-4e87-8f95-c144d662c613-kube-api-access-snmkg\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.442911 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-config-data\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.442947 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6609a58-f8a7-4e87-8f95-c144d662c613-log-httpd\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.443001 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-scripts\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.443047 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d9616fc3-155f-4546-a191-2bd6337a71a7-operator-scripts\") pod \"nova-cell0-db-create-v9nls\" (UID: \"d9616fc3-155f-4546-a191-2bd6337a71a7\") " pod="openstack/nova-cell0-db-create-v9nls" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.443070 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.443088 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xc7pq\" (UniqueName: \"kubernetes.io/projected/0884198d-4d47-4e53-8bb6-ea2e8365cadd-kube-api-access-xc7pq\") pod \"nova-api-db-create-s4rht\" (UID: \"0884198d-4d47-4e53-8bb6-ea2e8365cadd\") " pod="openstack/nova-api-db-create-s4rht" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.443111 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6609a58-f8a7-4e87-8f95-c144d662c613-run-httpd\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.443129 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0884198d-4d47-4e53-8bb6-ea2e8365cadd-operator-scripts\") pod \"nova-api-db-create-s4rht\" (UID: \"0884198d-4d47-4e53-8bb6-ea2e8365cadd\") " pod="openstack/nova-api-db-create-s4rht" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.443147 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwjhk\" (UniqueName: \"kubernetes.io/projected/d9616fc3-155f-4546-a191-2bd6337a71a7-kube-api-access-fwjhk\") pod \"nova-cell0-db-create-v9nls\" (UID: \"d9616fc3-155f-4546-a191-2bd6337a71a7\") " pod="openstack/nova-cell0-db-create-v9nls" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.443185 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.445583 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0884198d-4d47-4e53-8bb6-ea2e8365cadd-operator-scripts\") pod \"nova-api-db-create-s4rht\" (UID: \"0884198d-4d47-4e53-8bb6-ea2e8365cadd\") " pod="openstack/nova-api-db-create-s4rht" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.455401 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-nfvs8"] Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.480590 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xc7pq\" (UniqueName: \"kubernetes.io/projected/0884198d-4d47-4e53-8bb6-ea2e8365cadd-kube-api-access-xc7pq\") pod \"nova-api-db-create-s4rht\" (UID: \"0884198d-4d47-4e53-8bb6-ea2e8365cadd\") " pod="openstack/nova-api-db-create-s4rht" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.530595 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-eaa6-account-create-update-q5lzr"] Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.531536 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-eaa6-account-create-update-q5lzr" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.533114 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.541592 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-eaa6-account-create-update-q5lzr"] Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.547616 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6609a58-f8a7-4e87-8f95-c144d662c613-run-httpd\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.547742 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwjhk\" (UniqueName: \"kubernetes.io/projected/d9616fc3-155f-4546-a191-2bd6337a71a7-kube-api-access-fwjhk\") pod \"nova-cell0-db-create-v9nls\" (UID: \"d9616fc3-155f-4546-a191-2bd6337a71a7\") " pod="openstack/nova-cell0-db-create-v9nls" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.547834 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.547910 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xltgb\" (UniqueName: \"kubernetes.io/projected/8251c07b-8159-40bd-8f32-51d8b0c4568a-kube-api-access-xltgb\") pod \"nova-api-e361-account-create-update-g2428\" (UID: \"8251c07b-8159-40bd-8f32-51d8b0c4568a\") " pod="openstack/nova-api-e361-account-create-update-g2428" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.548025 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snmkg\" (UniqueName: \"kubernetes.io/projected/b6609a58-f8a7-4e87-8f95-c144d662c613-kube-api-access-snmkg\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.548096 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hpn22\" (UniqueName: \"kubernetes.io/projected/28e2a9c4-1fc5-42d5-9afe-63d569d58db4-kube-api-access-hpn22\") pod \"nova-cell1-db-create-nfvs8\" (UID: \"28e2a9c4-1fc5-42d5-9afe-63d569d58db4\") " pod="openstack/nova-cell1-db-create-nfvs8" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.548169 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28e2a9c4-1fc5-42d5-9afe-63d569d58db4-operator-scripts\") pod \"nova-cell1-db-create-nfvs8\" (UID: \"28e2a9c4-1fc5-42d5-9afe-63d569d58db4\") " pod="openstack/nova-cell1-db-create-nfvs8" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.548241 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-config-data\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.548313 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6609a58-f8a7-4e87-8f95-c144d662c613-log-httpd\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.548406 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8251c07b-8159-40bd-8f32-51d8b0c4568a-operator-scripts\") pod \"nova-api-e361-account-create-update-g2428\" (UID: \"8251c07b-8159-40bd-8f32-51d8b0c4568a\") " pod="openstack/nova-api-e361-account-create-update-g2428" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.548480 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-scripts\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.548572 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d9616fc3-155f-4546-a191-2bd6337a71a7-operator-scripts\") pod \"nova-cell0-db-create-v9nls\" (UID: \"d9616fc3-155f-4546-a191-2bd6337a71a7\") " pod="openstack/nova-cell0-db-create-v9nls" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.548635 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.549236 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-s4rht" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.549869 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6609a58-f8a7-4e87-8f95-c144d662c613-run-httpd\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.550843 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d9616fc3-155f-4546-a191-2bd6337a71a7-operator-scripts\") pod \"nova-cell0-db-create-v9nls\" (UID: \"d9616fc3-155f-4546-a191-2bd6337a71a7\") " pod="openstack/nova-cell0-db-create-v9nls" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.550869 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6609a58-f8a7-4e87-8f95-c144d662c613-log-httpd\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.553909 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-scripts\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.561592 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.572487 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.574440 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-config-data\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.575843 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwjhk\" (UniqueName: \"kubernetes.io/projected/d9616fc3-155f-4546-a191-2bd6337a71a7-kube-api-access-fwjhk\") pod \"nova-cell0-db-create-v9nls\" (UID: \"d9616fc3-155f-4546-a191-2bd6337a71a7\") " pod="openstack/nova-cell0-db-create-v9nls" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.576305 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snmkg\" (UniqueName: \"kubernetes.io/projected/b6609a58-f8a7-4e87-8f95-c144d662c613-kube-api-access-snmkg\") pod \"ceilometer-0\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.585144 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.651304 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xltgb\" (UniqueName: \"kubernetes.io/projected/8251c07b-8159-40bd-8f32-51d8b0c4568a-kube-api-access-xltgb\") pod \"nova-api-e361-account-create-update-g2428\" (UID: \"8251c07b-8159-40bd-8f32-51d8b0c4568a\") " pod="openstack/nova-api-e361-account-create-update-g2428" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.651343 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hpn22\" (UniqueName: \"kubernetes.io/projected/28e2a9c4-1fc5-42d5-9afe-63d569d58db4-kube-api-access-hpn22\") pod \"nova-cell1-db-create-nfvs8\" (UID: \"28e2a9c4-1fc5-42d5-9afe-63d569d58db4\") " pod="openstack/nova-cell1-db-create-nfvs8" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.651365 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28e2a9c4-1fc5-42d5-9afe-63d569d58db4-operator-scripts\") pod \"nova-cell1-db-create-nfvs8\" (UID: \"28e2a9c4-1fc5-42d5-9afe-63d569d58db4\") " pod="openstack/nova-cell1-db-create-nfvs8" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.651422 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e3d9b34a-edae-4f44-b4e1-78b3ece44177-operator-scripts\") pod \"nova-cell0-eaa6-account-create-update-q5lzr\" (UID: \"e3d9b34a-edae-4f44-b4e1-78b3ece44177\") " pod="openstack/nova-cell0-eaa6-account-create-update-q5lzr" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.651452 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8251c07b-8159-40bd-8f32-51d8b0c4568a-operator-scripts\") pod \"nova-api-e361-account-create-update-g2428\" (UID: \"8251c07b-8159-40bd-8f32-51d8b0c4568a\") " pod="openstack/nova-api-e361-account-create-update-g2428" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.651501 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6sj28\" (UniqueName: \"kubernetes.io/projected/e3d9b34a-edae-4f44-b4e1-78b3ece44177-kube-api-access-6sj28\") pod \"nova-cell0-eaa6-account-create-update-q5lzr\" (UID: \"e3d9b34a-edae-4f44-b4e1-78b3ece44177\") " pod="openstack/nova-cell0-eaa6-account-create-update-q5lzr" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.652220 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28e2a9c4-1fc5-42d5-9afe-63d569d58db4-operator-scripts\") pod \"nova-cell1-db-create-nfvs8\" (UID: \"28e2a9c4-1fc5-42d5-9afe-63d569d58db4\") " pod="openstack/nova-cell1-db-create-nfvs8" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.652305 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8251c07b-8159-40bd-8f32-51d8b0c4568a-operator-scripts\") pod \"nova-api-e361-account-create-update-g2428\" (UID: \"8251c07b-8159-40bd-8f32-51d8b0c4568a\") " pod="openstack/nova-api-e361-account-create-update-g2428" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.662404 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-v9nls" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.667699 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xltgb\" (UniqueName: \"kubernetes.io/projected/8251c07b-8159-40bd-8f32-51d8b0c4568a-kube-api-access-xltgb\") pod \"nova-api-e361-account-create-update-g2428\" (UID: \"8251c07b-8159-40bd-8f32-51d8b0c4568a\") " pod="openstack/nova-api-e361-account-create-update-g2428" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.670774 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpn22\" (UniqueName: \"kubernetes.io/projected/28e2a9c4-1fc5-42d5-9afe-63d569d58db4-kube-api-access-hpn22\") pod \"nova-cell1-db-create-nfvs8\" (UID: \"28e2a9c4-1fc5-42d5-9afe-63d569d58db4\") " pod="openstack/nova-cell1-db-create-nfvs8" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.698199 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e361-account-create-update-g2428" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.742852 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-c848-account-create-update-cwvm5"] Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.744820 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-c848-account-create-update-cwvm5" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.748127 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.752963 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e3d9b34a-edae-4f44-b4e1-78b3ece44177-operator-scripts\") pod \"nova-cell0-eaa6-account-create-update-q5lzr\" (UID: \"e3d9b34a-edae-4f44-b4e1-78b3ece44177\") " pod="openstack/nova-cell0-eaa6-account-create-update-q5lzr" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.753035 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6sj28\" (UniqueName: \"kubernetes.io/projected/e3d9b34a-edae-4f44-b4e1-78b3ece44177-kube-api-access-6sj28\") pod \"nova-cell0-eaa6-account-create-update-q5lzr\" (UID: \"e3d9b34a-edae-4f44-b4e1-78b3ece44177\") " pod="openstack/nova-cell0-eaa6-account-create-update-q5lzr" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.755151 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-c848-account-create-update-cwvm5"] Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.761222 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-nfvs8" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.770619 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e3d9b34a-edae-4f44-b4e1-78b3ece44177-operator-scripts\") pod \"nova-cell0-eaa6-account-create-update-q5lzr\" (UID: \"e3d9b34a-edae-4f44-b4e1-78b3ece44177\") " pod="openstack/nova-cell0-eaa6-account-create-update-q5lzr" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.776563 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6sj28\" (UniqueName: \"kubernetes.io/projected/e3d9b34a-edae-4f44-b4e1-78b3ece44177-kube-api-access-6sj28\") pod \"nova-cell0-eaa6-account-create-update-q5lzr\" (UID: \"e3d9b34a-edae-4f44-b4e1-78b3ece44177\") " pod="openstack/nova-cell0-eaa6-account-create-update-q5lzr" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.848413 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-eaa6-account-create-update-q5lzr" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.873175 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqfz5\" (UniqueName: \"kubernetes.io/projected/a6e7f7d2-60c3-4bf4-925e-06b4f92b333d-kube-api-access-kqfz5\") pod \"nova-cell1-c848-account-create-update-cwvm5\" (UID: \"a6e7f7d2-60c3-4bf4-925e-06b4f92b333d\") " pod="openstack/nova-cell1-c848-account-create-update-cwvm5" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.873252 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6e7f7d2-60c3-4bf4-925e-06b4f92b333d-operator-scripts\") pod \"nova-cell1-c848-account-create-update-cwvm5\" (UID: \"a6e7f7d2-60c3-4bf4-925e-06b4f92b333d\") " pod="openstack/nova-cell1-c848-account-create-update-cwvm5" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.975134 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqfz5\" (UniqueName: \"kubernetes.io/projected/a6e7f7d2-60c3-4bf4-925e-06b4f92b333d-kube-api-access-kqfz5\") pod \"nova-cell1-c848-account-create-update-cwvm5\" (UID: \"a6e7f7d2-60c3-4bf4-925e-06b4f92b333d\") " pod="openstack/nova-cell1-c848-account-create-update-cwvm5" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.975518 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6e7f7d2-60c3-4bf4-925e-06b4f92b333d-operator-scripts\") pod \"nova-cell1-c848-account-create-update-cwvm5\" (UID: \"a6e7f7d2-60c3-4bf4-925e-06b4f92b333d\") " pod="openstack/nova-cell1-c848-account-create-update-cwvm5" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.976174 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6e7f7d2-60c3-4bf4-925e-06b4f92b333d-operator-scripts\") pod \"nova-cell1-c848-account-create-update-cwvm5\" (UID: \"a6e7f7d2-60c3-4bf4-925e-06b4f92b333d\") " pod="openstack/nova-cell1-c848-account-create-update-cwvm5" Nov 28 11:29:13 crc kubenswrapper[4923]: I1128 11:29:13.992496 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqfz5\" (UniqueName: \"kubernetes.io/projected/a6e7f7d2-60c3-4bf4-925e-06b4f92b333d-kube-api-access-kqfz5\") pod \"nova-cell1-c848-account-create-update-cwvm5\" (UID: \"a6e7f7d2-60c3-4bf4-925e-06b4f92b333d\") " pod="openstack/nova-cell1-c848-account-create-update-cwvm5" Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.079555 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-c848-account-create-update-cwvm5" Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.112513 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-s4rht"] Nov 28 11:29:14 crc kubenswrapper[4923]: W1128 11:29:14.137046 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0884198d_4d47_4e53_8bb6_ea2e8365cadd.slice/crio-61295a08eb903c2cc24d7a1b6708a6ed3024ab2223207ba1fb2987ab0e072114 WatchSource:0}: Error finding container 61295a08eb903c2cc24d7a1b6708a6ed3024ab2223207ba1fb2987ab0e072114: Status 404 returned error can't find the container with id 61295a08eb903c2cc24d7a1b6708a6ed3024ab2223207ba1fb2987ab0e072114 Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.215301 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.279545 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-v9nls"] Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.423746 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-e361-account-create-update-g2428"] Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.434741 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-nfvs8"] Nov 28 11:29:14 crc kubenswrapper[4923]: W1128 11:29:14.435663 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28e2a9c4_1fc5_42d5_9afe_63d569d58db4.slice/crio-45ef44b22ab3b44706973431154dc250ca66e89758df2e0347249592ae55f387 WatchSource:0}: Error finding container 45ef44b22ab3b44706973431154dc250ca66e89758df2e0347249592ae55f387: Status 404 returned error can't find the container with id 45ef44b22ab3b44706973431154dc250ca66e89758df2e0347249592ae55f387 Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.630687 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-eaa6-account-create-update-q5lzr"] Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.665293 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-c848-account-create-update-cwvm5"] Nov 28 11:29:14 crc kubenswrapper[4923]: W1128 11:29:14.680043 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda6e7f7d2_60c3_4bf4_925e_06b4f92b333d.slice/crio-82476167b0bdccc5ada8d91eb15d8e43d336c2132594989903db8d6d5b303c15 WatchSource:0}: Error finding container 82476167b0bdccc5ada8d91eb15d8e43d336c2132594989903db8d6d5b303c15: Status 404 returned error can't find the container with id 82476167b0bdccc5ada8d91eb15d8e43d336c2132594989903db8d6d5b303c15 Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.969714 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-eaa6-account-create-update-q5lzr" event={"ID":"e3d9b34a-edae-4f44-b4e1-78b3ece44177","Type":"ContainerStarted","Data":"a3b5459607c6561ea92074c815ab5308bd2847c59c84bff0964a44dcd9d77c33"} Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.969924 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-eaa6-account-create-update-q5lzr" event={"ID":"e3d9b34a-edae-4f44-b4e1-78b3ece44177","Type":"ContainerStarted","Data":"e711772b2b1ba383952f3f5efc540a06922c3d8371b118a1300d29bfa9ab4272"} Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.972432 4923 generic.go:334] "Generic (PLEG): container finished" podID="28e2a9c4-1fc5-42d5-9afe-63d569d58db4" containerID="b6d0d500917d58745e9dd95496c94c108616618eda406f6118dedf1b5123f2d0" exitCode=0 Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.972492 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-nfvs8" event={"ID":"28e2a9c4-1fc5-42d5-9afe-63d569d58db4","Type":"ContainerDied","Data":"b6d0d500917d58745e9dd95496c94c108616618eda406f6118dedf1b5123f2d0"} Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.972507 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-nfvs8" event={"ID":"28e2a9c4-1fc5-42d5-9afe-63d569d58db4","Type":"ContainerStarted","Data":"45ef44b22ab3b44706973431154dc250ca66e89758df2e0347249592ae55f387"} Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.976096 4923 generic.go:334] "Generic (PLEG): container finished" podID="0884198d-4d47-4e53-8bb6-ea2e8365cadd" containerID="a6a46703e68f3f61cb5f0f71bbc5d80e8dc42858564ac4249f5b7327c8b93fa5" exitCode=0 Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.976141 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-s4rht" event={"ID":"0884198d-4d47-4e53-8bb6-ea2e8365cadd","Type":"ContainerDied","Data":"a6a46703e68f3f61cb5f0f71bbc5d80e8dc42858564ac4249f5b7327c8b93fa5"} Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.976158 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-s4rht" event={"ID":"0884198d-4d47-4e53-8bb6-ea2e8365cadd","Type":"ContainerStarted","Data":"61295a08eb903c2cc24d7a1b6708a6ed3024ab2223207ba1fb2987ab0e072114"} Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.979019 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-c848-account-create-update-cwvm5" event={"ID":"a6e7f7d2-60c3-4bf4-925e-06b4f92b333d","Type":"ContainerStarted","Data":"f32f3db62c773c399eff8e87a70356fd322b810129d7b02ef8f4f1ae002cab61"} Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.979063 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-c848-account-create-update-cwvm5" event={"ID":"a6e7f7d2-60c3-4bf4-925e-06b4f92b333d","Type":"ContainerStarted","Data":"82476167b0bdccc5ada8d91eb15d8e43d336c2132594989903db8d6d5b303c15"} Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.984448 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6609a58-f8a7-4e87-8f95-c144d662c613","Type":"ContainerStarted","Data":"ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569"} Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.984474 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6609a58-f8a7-4e87-8f95-c144d662c613","Type":"ContainerStarted","Data":"e92f22a262e724285f41ac0de5f4e14be40924ccbbeca567cbd9bcd1779b309b"} Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.986045 4923 generic.go:334] "Generic (PLEG): container finished" podID="d9616fc3-155f-4546-a191-2bd6337a71a7" containerID="f2f4b931fe6b25b5a0b10bfe1ecad11069e795755d331f21d65b3ab8e9b3d841" exitCode=0 Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.986096 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-v9nls" event={"ID":"d9616fc3-155f-4546-a191-2bd6337a71a7","Type":"ContainerDied","Data":"f2f4b931fe6b25b5a0b10bfe1ecad11069e795755d331f21d65b3ab8e9b3d841"} Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.986110 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-v9nls" event={"ID":"d9616fc3-155f-4546-a191-2bd6337a71a7","Type":"ContainerStarted","Data":"13f942f615a3a7fd9cc6987f44876145df3e56a3745be7c365ea974b054738fc"} Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.990054 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-e361-account-create-update-g2428" event={"ID":"8251c07b-8159-40bd-8f32-51d8b0c4568a","Type":"ContainerStarted","Data":"2c2cbe76ca8f2486db544ffd5af79c5c5163fc41ce33b20697626fe528913cb6"} Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.990094 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-e361-account-create-update-g2428" event={"ID":"8251c07b-8159-40bd-8f32-51d8b0c4568a","Type":"ContainerStarted","Data":"f705c1e1d9f53a67483598602d981e041033e8a2216f2d2ca48d2f196732a4e9"} Nov 28 11:29:14 crc kubenswrapper[4923]: I1128 11:29:14.995187 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-eaa6-account-create-update-q5lzr" podStartSLOduration=1.995169371 podStartE2EDuration="1.995169371s" podCreationTimestamp="2025-11-28 11:29:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:29:14.987359423 +0000 UTC m=+1234.116043633" watchObservedRunningTime="2025-11-28 11:29:14.995169371 +0000 UTC m=+1234.123853581" Nov 28 11:29:15 crc kubenswrapper[4923]: I1128 11:29:15.038591 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-c848-account-create-update-cwvm5" podStartSLOduration=2.038573057 podStartE2EDuration="2.038573057s" podCreationTimestamp="2025-11-28 11:29:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:29:15.037998881 +0000 UTC m=+1234.166683091" watchObservedRunningTime="2025-11-28 11:29:15.038573057 +0000 UTC m=+1234.167257267" Nov 28 11:29:15 crc kubenswrapper[4923]: I1128 11:29:15.089576 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-e361-account-create-update-g2428" podStartSLOduration=2.089560094 podStartE2EDuration="2.089560094s" podCreationTimestamp="2025-11-28 11:29:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:29:15.082772784 +0000 UTC m=+1234.211457004" watchObservedRunningTime="2025-11-28 11:29:15.089560094 +0000 UTC m=+1234.218244304" Nov 28 11:29:15 crc kubenswrapper[4923]: I1128 11:29:15.177899 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a" path="/var/lib/kubelet/pods/e6eb74b5-f609-43e9-8b17-c7f72ca8ab6a/volumes" Nov 28 11:29:15 crc kubenswrapper[4923]: I1128 11:29:15.998127 4923 generic.go:334] "Generic (PLEG): container finished" podID="e3d9b34a-edae-4f44-b4e1-78b3ece44177" containerID="a3b5459607c6561ea92074c815ab5308bd2847c59c84bff0964a44dcd9d77c33" exitCode=0 Nov 28 11:29:15 crc kubenswrapper[4923]: I1128 11:29:15.998712 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-eaa6-account-create-update-q5lzr" event={"ID":"e3d9b34a-edae-4f44-b4e1-78b3ece44177","Type":"ContainerDied","Data":"a3b5459607c6561ea92074c815ab5308bd2847c59c84bff0964a44dcd9d77c33"} Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.000457 4923 generic.go:334] "Generic (PLEG): container finished" podID="a6e7f7d2-60c3-4bf4-925e-06b4f92b333d" containerID="f32f3db62c773c399eff8e87a70356fd322b810129d7b02ef8f4f1ae002cab61" exitCode=0 Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.000529 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-c848-account-create-update-cwvm5" event={"ID":"a6e7f7d2-60c3-4bf4-925e-06b4f92b333d","Type":"ContainerDied","Data":"f32f3db62c773c399eff8e87a70356fd322b810129d7b02ef8f4f1ae002cab61"} Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.002452 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6609a58-f8a7-4e87-8f95-c144d662c613","Type":"ContainerStarted","Data":"7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3"} Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.003811 4923 generic.go:334] "Generic (PLEG): container finished" podID="8251c07b-8159-40bd-8f32-51d8b0c4568a" containerID="2c2cbe76ca8f2486db544ffd5af79c5c5163fc41ce33b20697626fe528913cb6" exitCode=0 Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.004079 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-e361-account-create-update-g2428" event={"ID":"8251c07b-8159-40bd-8f32-51d8b0c4568a","Type":"ContainerDied","Data":"2c2cbe76ca8f2486db544ffd5af79c5c5163fc41ce33b20697626fe528913cb6"} Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.463331 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-nfvs8" Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.486313 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-v9nls" Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.512057 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-s4rht" Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.631252 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d9616fc3-155f-4546-a191-2bd6337a71a7-operator-scripts\") pod \"d9616fc3-155f-4546-a191-2bd6337a71a7\" (UID: \"d9616fc3-155f-4546-a191-2bd6337a71a7\") " Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.631296 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xc7pq\" (UniqueName: \"kubernetes.io/projected/0884198d-4d47-4e53-8bb6-ea2e8365cadd-kube-api-access-xc7pq\") pod \"0884198d-4d47-4e53-8bb6-ea2e8365cadd\" (UID: \"0884198d-4d47-4e53-8bb6-ea2e8365cadd\") " Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.631334 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0884198d-4d47-4e53-8bb6-ea2e8365cadd-operator-scripts\") pod \"0884198d-4d47-4e53-8bb6-ea2e8365cadd\" (UID: \"0884198d-4d47-4e53-8bb6-ea2e8365cadd\") " Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.631378 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28e2a9c4-1fc5-42d5-9afe-63d569d58db4-operator-scripts\") pod \"28e2a9c4-1fc5-42d5-9afe-63d569d58db4\" (UID: \"28e2a9c4-1fc5-42d5-9afe-63d569d58db4\") " Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.631409 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hpn22\" (UniqueName: \"kubernetes.io/projected/28e2a9c4-1fc5-42d5-9afe-63d569d58db4-kube-api-access-hpn22\") pod \"28e2a9c4-1fc5-42d5-9afe-63d569d58db4\" (UID: \"28e2a9c4-1fc5-42d5-9afe-63d569d58db4\") " Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.631448 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fwjhk\" (UniqueName: \"kubernetes.io/projected/d9616fc3-155f-4546-a191-2bd6337a71a7-kube-api-access-fwjhk\") pod \"d9616fc3-155f-4546-a191-2bd6337a71a7\" (UID: \"d9616fc3-155f-4546-a191-2bd6337a71a7\") " Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.631817 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28e2a9c4-1fc5-42d5-9afe-63d569d58db4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "28e2a9c4-1fc5-42d5-9afe-63d569d58db4" (UID: "28e2a9c4-1fc5-42d5-9afe-63d569d58db4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.632181 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0884198d-4d47-4e53-8bb6-ea2e8365cadd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0884198d-4d47-4e53-8bb6-ea2e8365cadd" (UID: "0884198d-4d47-4e53-8bb6-ea2e8365cadd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.632873 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9616fc3-155f-4546-a191-2bd6337a71a7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d9616fc3-155f-4546-a191-2bd6337a71a7" (UID: "d9616fc3-155f-4546-a191-2bd6337a71a7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.636870 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0884198d-4d47-4e53-8bb6-ea2e8365cadd-kube-api-access-xc7pq" (OuterVolumeSpecName: "kube-api-access-xc7pq") pod "0884198d-4d47-4e53-8bb6-ea2e8365cadd" (UID: "0884198d-4d47-4e53-8bb6-ea2e8365cadd"). InnerVolumeSpecName "kube-api-access-xc7pq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.637275 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28e2a9c4-1fc5-42d5-9afe-63d569d58db4-kube-api-access-hpn22" (OuterVolumeSpecName: "kube-api-access-hpn22") pod "28e2a9c4-1fc5-42d5-9afe-63d569d58db4" (UID: "28e2a9c4-1fc5-42d5-9afe-63d569d58db4"). InnerVolumeSpecName "kube-api-access-hpn22". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.638123 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9616fc3-155f-4546-a191-2bd6337a71a7-kube-api-access-fwjhk" (OuterVolumeSpecName: "kube-api-access-fwjhk") pod "d9616fc3-155f-4546-a191-2bd6337a71a7" (UID: "d9616fc3-155f-4546-a191-2bd6337a71a7"). InnerVolumeSpecName "kube-api-access-fwjhk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.732810 4923 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d9616fc3-155f-4546-a191-2bd6337a71a7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.732842 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xc7pq\" (UniqueName: \"kubernetes.io/projected/0884198d-4d47-4e53-8bb6-ea2e8365cadd-kube-api-access-xc7pq\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.732855 4923 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0884198d-4d47-4e53-8bb6-ea2e8365cadd-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.732864 4923 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28e2a9c4-1fc5-42d5-9afe-63d569d58db4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.732872 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hpn22\" (UniqueName: \"kubernetes.io/projected/28e2a9c4-1fc5-42d5-9afe-63d569d58db4-kube-api-access-hpn22\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:16 crc kubenswrapper[4923]: I1128 11:29:16.732880 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fwjhk\" (UniqueName: \"kubernetes.io/projected/d9616fc3-155f-4546-a191-2bd6337a71a7-kube-api-access-fwjhk\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.069895 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-v9nls" event={"ID":"d9616fc3-155f-4546-a191-2bd6337a71a7","Type":"ContainerDied","Data":"13f942f615a3a7fd9cc6987f44876145df3e56a3745be7c365ea974b054738fc"} Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.070179 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="13f942f615a3a7fd9cc6987f44876145df3e56a3745be7c365ea974b054738fc" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.070047 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-v9nls" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.103260 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.117361 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-nfvs8" event={"ID":"28e2a9c4-1fc5-42d5-9afe-63d569d58db4","Type":"ContainerDied","Data":"45ef44b22ab3b44706973431154dc250ca66e89758df2e0347249592ae55f387"} Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.117399 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45ef44b22ab3b44706973431154dc250ca66e89758df2e0347249592ae55f387" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.117467 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-nfvs8" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.137519 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-s4rht" event={"ID":"0884198d-4d47-4e53-8bb6-ea2e8365cadd","Type":"ContainerDied","Data":"61295a08eb903c2cc24d7a1b6708a6ed3024ab2223207ba1fb2987ab0e072114"} Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.137557 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="61295a08eb903c2cc24d7a1b6708a6ed3024ab2223207ba1fb2987ab0e072114" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.137659 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-s4rht" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.151125 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6609a58-f8a7-4e87-8f95-c144d662c613","Type":"ContainerStarted","Data":"2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc"} Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.570502 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-c848-account-create-update-cwvm5" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.659792 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e361-account-create-update-g2428" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.662762 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6e7f7d2-60c3-4bf4-925e-06b4f92b333d-operator-scripts\") pod \"a6e7f7d2-60c3-4bf4-925e-06b4f92b333d\" (UID: \"a6e7f7d2-60c3-4bf4-925e-06b4f92b333d\") " Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.662900 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqfz5\" (UniqueName: \"kubernetes.io/projected/a6e7f7d2-60c3-4bf4-925e-06b4f92b333d-kube-api-access-kqfz5\") pod \"a6e7f7d2-60c3-4bf4-925e-06b4f92b333d\" (UID: \"a6e7f7d2-60c3-4bf4-925e-06b4f92b333d\") " Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.663839 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6e7f7d2-60c3-4bf4-925e-06b4f92b333d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a6e7f7d2-60c3-4bf4-925e-06b4f92b333d" (UID: "a6e7f7d2-60c3-4bf4-925e-06b4f92b333d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.664321 4923 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6e7f7d2-60c3-4bf4-925e-06b4f92b333d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.673084 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6e7f7d2-60c3-4bf4-925e-06b4f92b333d-kube-api-access-kqfz5" (OuterVolumeSpecName: "kube-api-access-kqfz5") pod "a6e7f7d2-60c3-4bf4-925e-06b4f92b333d" (UID: "a6e7f7d2-60c3-4bf4-925e-06b4f92b333d"). InnerVolumeSpecName "kube-api-access-kqfz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.743312 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-eaa6-account-create-update-q5lzr" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.764953 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xltgb\" (UniqueName: \"kubernetes.io/projected/8251c07b-8159-40bd-8f32-51d8b0c4568a-kube-api-access-xltgb\") pod \"8251c07b-8159-40bd-8f32-51d8b0c4568a\" (UID: \"8251c07b-8159-40bd-8f32-51d8b0c4568a\") " Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.765354 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8251c07b-8159-40bd-8f32-51d8b0c4568a-operator-scripts\") pod \"8251c07b-8159-40bd-8f32-51d8b0c4568a\" (UID: \"8251c07b-8159-40bd-8f32-51d8b0c4568a\") " Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.765805 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqfz5\" (UniqueName: \"kubernetes.io/projected/a6e7f7d2-60c3-4bf4-925e-06b4f92b333d-kube-api-access-kqfz5\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.766417 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8251c07b-8159-40bd-8f32-51d8b0c4568a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8251c07b-8159-40bd-8f32-51d8b0c4568a" (UID: "8251c07b-8159-40bd-8f32-51d8b0c4568a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.769232 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8251c07b-8159-40bd-8f32-51d8b0c4568a-kube-api-access-xltgb" (OuterVolumeSpecName: "kube-api-access-xltgb") pod "8251c07b-8159-40bd-8f32-51d8b0c4568a" (UID: "8251c07b-8159-40bd-8f32-51d8b0c4568a"). InnerVolumeSpecName "kube-api-access-xltgb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.866501 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e3d9b34a-edae-4f44-b4e1-78b3ece44177-operator-scripts\") pod \"e3d9b34a-edae-4f44-b4e1-78b3ece44177\" (UID: \"e3d9b34a-edae-4f44-b4e1-78b3ece44177\") " Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.866743 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6sj28\" (UniqueName: \"kubernetes.io/projected/e3d9b34a-edae-4f44-b4e1-78b3ece44177-kube-api-access-6sj28\") pod \"e3d9b34a-edae-4f44-b4e1-78b3ece44177\" (UID: \"e3d9b34a-edae-4f44-b4e1-78b3ece44177\") " Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.867058 4923 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8251c07b-8159-40bd-8f32-51d8b0c4568a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.867074 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xltgb\" (UniqueName: \"kubernetes.io/projected/8251c07b-8159-40bd-8f32-51d8b0c4568a-kube-api-access-xltgb\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.867745 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3d9b34a-edae-4f44-b4e1-78b3ece44177-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e3d9b34a-edae-4f44-b4e1-78b3ece44177" (UID: "e3d9b34a-edae-4f44-b4e1-78b3ece44177"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.869533 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3d9b34a-edae-4f44-b4e1-78b3ece44177-kube-api-access-6sj28" (OuterVolumeSpecName: "kube-api-access-6sj28") pod "e3d9b34a-edae-4f44-b4e1-78b3ece44177" (UID: "e3d9b34a-edae-4f44-b4e1-78b3ece44177"). InnerVolumeSpecName "kube-api-access-6sj28". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.968607 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6sj28\" (UniqueName: \"kubernetes.io/projected/e3d9b34a-edae-4f44-b4e1-78b3ece44177-kube-api-access-6sj28\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:17 crc kubenswrapper[4923]: I1128 11:29:17.968634 4923 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e3d9b34a-edae-4f44-b4e1-78b3ece44177-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.159522 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-e361-account-create-update-g2428" event={"ID":"8251c07b-8159-40bd-8f32-51d8b0c4568a","Type":"ContainerDied","Data":"f705c1e1d9f53a67483598602d981e041033e8a2216f2d2ca48d2f196732a4e9"} Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.159816 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f705c1e1d9f53a67483598602d981e041033e8a2216f2d2ca48d2f196732a4e9" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.159536 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e361-account-create-update-g2428" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.162206 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-eaa6-account-create-update-q5lzr" event={"ID":"e3d9b34a-edae-4f44-b4e1-78b3ece44177","Type":"ContainerDied","Data":"e711772b2b1ba383952f3f5efc540a06922c3d8371b118a1300d29bfa9ab4272"} Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.162228 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-eaa6-account-create-update-q5lzr" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.162246 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e711772b2b1ba383952f3f5efc540a06922c3d8371b118a1300d29bfa9ab4272" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.163923 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-c848-account-create-update-cwvm5" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.163957 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-c848-account-create-update-cwvm5" event={"ID":"a6e7f7d2-60c3-4bf4-925e-06b4f92b333d","Type":"ContainerDied","Data":"82476167b0bdccc5ada8d91eb15d8e43d336c2132594989903db8d6d5b303c15"} Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.163990 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82476167b0bdccc5ada8d91eb15d8e43d336c2132594989903db8d6d5b303c15" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.168126 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6609a58-f8a7-4e87-8f95-c144d662c613","Type":"ContainerStarted","Data":"861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce"} Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.168271 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerName="ceilometer-central-agent" containerID="cri-o://ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569" gracePeriod=30 Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.168515 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.168741 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerName="proxy-httpd" containerID="cri-o://861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce" gracePeriod=30 Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.168788 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerName="sg-core" containerID="cri-o://2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc" gracePeriod=30 Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.168820 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerName="ceilometer-notification-agent" containerID="cri-o://7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3" gracePeriod=30 Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.199151 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.034441391 podStartE2EDuration="5.199133626s" podCreationTimestamp="2025-11-28 11:29:13 +0000 UTC" firstStartedPulling="2025-11-28 11:29:14.239329172 +0000 UTC m=+1233.368013372" lastFinishedPulling="2025-11-28 11:29:17.404021397 +0000 UTC m=+1236.532705607" observedRunningTime="2025-11-28 11:29:18.194863797 +0000 UTC m=+1237.323548007" watchObservedRunningTime="2025-11-28 11:29:18.199133626 +0000 UTC m=+1237.327817836" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.770660 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.885920 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-sg-core-conf-yaml\") pod \"b6609a58-f8a7-4e87-8f95-c144d662c613\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.886068 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-scripts\") pod \"b6609a58-f8a7-4e87-8f95-c144d662c613\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.886101 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-config-data\") pod \"b6609a58-f8a7-4e87-8f95-c144d662c613\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.886157 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6609a58-f8a7-4e87-8f95-c144d662c613-run-httpd\") pod \"b6609a58-f8a7-4e87-8f95-c144d662c613\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.886199 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6609a58-f8a7-4e87-8f95-c144d662c613-log-httpd\") pod \"b6609a58-f8a7-4e87-8f95-c144d662c613\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.886236 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-snmkg\" (UniqueName: \"kubernetes.io/projected/b6609a58-f8a7-4e87-8f95-c144d662c613-kube-api-access-snmkg\") pod \"b6609a58-f8a7-4e87-8f95-c144d662c613\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.886254 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-combined-ca-bundle\") pod \"b6609a58-f8a7-4e87-8f95-c144d662c613\" (UID: \"b6609a58-f8a7-4e87-8f95-c144d662c613\") " Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.886531 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6609a58-f8a7-4e87-8f95-c144d662c613-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b6609a58-f8a7-4e87-8f95-c144d662c613" (UID: "b6609a58-f8a7-4e87-8f95-c144d662c613"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.886665 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6609a58-f8a7-4e87-8f95-c144d662c613-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b6609a58-f8a7-4e87-8f95-c144d662c613" (UID: "b6609a58-f8a7-4e87-8f95-c144d662c613"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.887084 4923 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6609a58-f8a7-4e87-8f95-c144d662c613-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.887100 4923 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6609a58-f8a7-4e87-8f95-c144d662c613-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.889680 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-scripts" (OuterVolumeSpecName: "scripts") pod "b6609a58-f8a7-4e87-8f95-c144d662c613" (UID: "b6609a58-f8a7-4e87-8f95-c144d662c613"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.891919 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6609a58-f8a7-4e87-8f95-c144d662c613-kube-api-access-snmkg" (OuterVolumeSpecName: "kube-api-access-snmkg") pod "b6609a58-f8a7-4e87-8f95-c144d662c613" (UID: "b6609a58-f8a7-4e87-8f95-c144d662c613"). InnerVolumeSpecName "kube-api-access-snmkg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.923228 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b6609a58-f8a7-4e87-8f95-c144d662c613" (UID: "b6609a58-f8a7-4e87-8f95-c144d662c613"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.968353 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-config-data" (OuterVolumeSpecName: "config-data") pod "b6609a58-f8a7-4e87-8f95-c144d662c613" (UID: "b6609a58-f8a7-4e87-8f95-c144d662c613"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.971125 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b6609a58-f8a7-4e87-8f95-c144d662c613" (UID: "b6609a58-f8a7-4e87-8f95-c144d662c613"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.989767 4923 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.990169 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.990180 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-snmkg\" (UniqueName: \"kubernetes.io/projected/b6609a58-f8a7-4e87-8f95-c144d662c613-kube-api-access-snmkg\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.990191 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:18 crc kubenswrapper[4923]: I1128 11:29:18.990211 4923 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b6609a58-f8a7-4e87-8f95-c144d662c613-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.182634 4923 generic.go:334] "Generic (PLEG): container finished" podID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerID="861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce" exitCode=0 Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.182658 4923 generic.go:334] "Generic (PLEG): container finished" podID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerID="2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc" exitCode=2 Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.182668 4923 generic.go:334] "Generic (PLEG): container finished" podID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerID="7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3" exitCode=0 Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.182675 4923 generic.go:334] "Generic (PLEG): container finished" podID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerID="ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569" exitCode=0 Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.182666 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6609a58-f8a7-4e87-8f95-c144d662c613","Type":"ContainerDied","Data":"861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce"} Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.182703 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6609a58-f8a7-4e87-8f95-c144d662c613","Type":"ContainerDied","Data":"2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc"} Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.182716 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6609a58-f8a7-4e87-8f95-c144d662c613","Type":"ContainerDied","Data":"7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3"} Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.182726 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6609a58-f8a7-4e87-8f95-c144d662c613","Type":"ContainerDied","Data":"ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569"} Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.182725 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.182745 4923 scope.go:117] "RemoveContainer" containerID="861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.182736 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6609a58-f8a7-4e87-8f95-c144d662c613","Type":"ContainerDied","Data":"e92f22a262e724285f41ac0de5f4e14be40924ccbbeca567cbd9bcd1779b309b"} Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.210282 4923 scope.go:117] "RemoveContainer" containerID="2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.220574 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.225472 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.241689 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:19 crc kubenswrapper[4923]: E1128 11:29:19.243421 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerName="proxy-httpd" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.243443 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerName="proxy-httpd" Nov 28 11:29:19 crc kubenswrapper[4923]: E1128 11:29:19.243457 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6e7f7d2-60c3-4bf4-925e-06b4f92b333d" containerName="mariadb-account-create-update" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.243464 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6e7f7d2-60c3-4bf4-925e-06b4f92b333d" containerName="mariadb-account-create-update" Nov 28 11:29:19 crc kubenswrapper[4923]: E1128 11:29:19.243477 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0884198d-4d47-4e53-8bb6-ea2e8365cadd" containerName="mariadb-database-create" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.243484 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="0884198d-4d47-4e53-8bb6-ea2e8365cadd" containerName="mariadb-database-create" Nov 28 11:29:19 crc kubenswrapper[4923]: E1128 11:29:19.243494 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerName="ceilometer-notification-agent" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.243500 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerName="ceilometer-notification-agent" Nov 28 11:29:19 crc kubenswrapper[4923]: E1128 11:29:19.243511 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerName="sg-core" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.243517 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerName="sg-core" Nov 28 11:29:19 crc kubenswrapper[4923]: E1128 11:29:19.243529 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8251c07b-8159-40bd-8f32-51d8b0c4568a" containerName="mariadb-account-create-update" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.243535 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="8251c07b-8159-40bd-8f32-51d8b0c4568a" containerName="mariadb-account-create-update" Nov 28 11:29:19 crc kubenswrapper[4923]: E1128 11:29:19.243545 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9616fc3-155f-4546-a191-2bd6337a71a7" containerName="mariadb-database-create" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.243552 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9616fc3-155f-4546-a191-2bd6337a71a7" containerName="mariadb-database-create" Nov 28 11:29:19 crc kubenswrapper[4923]: E1128 11:29:19.243559 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28e2a9c4-1fc5-42d5-9afe-63d569d58db4" containerName="mariadb-database-create" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.243566 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="28e2a9c4-1fc5-42d5-9afe-63d569d58db4" containerName="mariadb-database-create" Nov 28 11:29:19 crc kubenswrapper[4923]: E1128 11:29:19.243574 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3d9b34a-edae-4f44-b4e1-78b3ece44177" containerName="mariadb-account-create-update" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.243580 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3d9b34a-edae-4f44-b4e1-78b3ece44177" containerName="mariadb-account-create-update" Nov 28 11:29:19 crc kubenswrapper[4923]: E1128 11:29:19.243590 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerName="ceilometer-central-agent" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.243595 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerName="ceilometer-central-agent" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.243750 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="0884198d-4d47-4e53-8bb6-ea2e8365cadd" containerName="mariadb-database-create" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.243762 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="28e2a9c4-1fc5-42d5-9afe-63d569d58db4" containerName="mariadb-database-create" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.243769 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerName="ceilometer-central-agent" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.243783 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6e7f7d2-60c3-4bf4-925e-06b4f92b333d" containerName="mariadb-account-create-update" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.243795 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9616fc3-155f-4546-a191-2bd6337a71a7" containerName="mariadb-database-create" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.243803 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3d9b34a-edae-4f44-b4e1-78b3ece44177" containerName="mariadb-account-create-update" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.243813 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="8251c07b-8159-40bd-8f32-51d8b0c4568a" containerName="mariadb-account-create-update" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.244580 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerName="ceilometer-notification-agent" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.244599 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerName="proxy-httpd" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.244611 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6609a58-f8a7-4e87-8f95-c144d662c613" containerName="sg-core" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.246269 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.249195 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.249485 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.256311 4923 scope.go:117] "RemoveContainer" containerID="7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.260458 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.296783 4923 scope.go:117] "RemoveContainer" containerID="ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.312095 4923 scope.go:117] "RemoveContainer" containerID="861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce" Nov 28 11:29:19 crc kubenswrapper[4923]: E1128 11:29:19.312605 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce\": container with ID starting with 861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce not found: ID does not exist" containerID="861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.312645 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce"} err="failed to get container status \"861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce\": rpc error: code = NotFound desc = could not find container \"861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce\": container with ID starting with 861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce not found: ID does not exist" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.312672 4923 scope.go:117] "RemoveContainer" containerID="2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc" Nov 28 11:29:19 crc kubenswrapper[4923]: E1128 11:29:19.313400 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc\": container with ID starting with 2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc not found: ID does not exist" containerID="2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.313449 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc"} err="failed to get container status \"2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc\": rpc error: code = NotFound desc = could not find container \"2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc\": container with ID starting with 2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc not found: ID does not exist" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.313475 4923 scope.go:117] "RemoveContainer" containerID="7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3" Nov 28 11:29:19 crc kubenswrapper[4923]: E1128 11:29:19.313786 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3\": container with ID starting with 7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3 not found: ID does not exist" containerID="7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.313819 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3"} err="failed to get container status \"7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3\": rpc error: code = NotFound desc = could not find container \"7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3\": container with ID starting with 7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3 not found: ID does not exist" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.313841 4923 scope.go:117] "RemoveContainer" containerID="ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569" Nov 28 11:29:19 crc kubenswrapper[4923]: E1128 11:29:19.314171 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569\": container with ID starting with ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569 not found: ID does not exist" containerID="ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.314212 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569"} err="failed to get container status \"ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569\": rpc error: code = NotFound desc = could not find container \"ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569\": container with ID starting with ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569 not found: ID does not exist" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.314231 4923 scope.go:117] "RemoveContainer" containerID="861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.314477 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce"} err="failed to get container status \"861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce\": rpc error: code = NotFound desc = could not find container \"861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce\": container with ID starting with 861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce not found: ID does not exist" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.314514 4923 scope.go:117] "RemoveContainer" containerID="2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.314697 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc"} err="failed to get container status \"2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc\": rpc error: code = NotFound desc = could not find container \"2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc\": container with ID starting with 2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc not found: ID does not exist" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.314711 4923 scope.go:117] "RemoveContainer" containerID="7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.314972 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3"} err="failed to get container status \"7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3\": rpc error: code = NotFound desc = could not find container \"7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3\": container with ID starting with 7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3 not found: ID does not exist" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.314991 4923 scope.go:117] "RemoveContainer" containerID="ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.315325 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569"} err="failed to get container status \"ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569\": rpc error: code = NotFound desc = could not find container \"ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569\": container with ID starting with ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569 not found: ID does not exist" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.315350 4923 scope.go:117] "RemoveContainer" containerID="861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.317119 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce"} err="failed to get container status \"861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce\": rpc error: code = NotFound desc = could not find container \"861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce\": container with ID starting with 861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce not found: ID does not exist" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.317165 4923 scope.go:117] "RemoveContainer" containerID="2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.317513 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc"} err="failed to get container status \"2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc\": rpc error: code = NotFound desc = could not find container \"2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc\": container with ID starting with 2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc not found: ID does not exist" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.317534 4923 scope.go:117] "RemoveContainer" containerID="7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.317792 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3"} err="failed to get container status \"7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3\": rpc error: code = NotFound desc = could not find container \"7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3\": container with ID starting with 7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3 not found: ID does not exist" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.317812 4923 scope.go:117] "RemoveContainer" containerID="ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.318081 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569"} err="failed to get container status \"ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569\": rpc error: code = NotFound desc = could not find container \"ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569\": container with ID starting with ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569 not found: ID does not exist" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.318101 4923 scope.go:117] "RemoveContainer" containerID="861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.318509 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce"} err="failed to get container status \"861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce\": rpc error: code = NotFound desc = could not find container \"861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce\": container with ID starting with 861ad6e47e57496510b5fc350abc6ed8089d27d09627189345c10d3d875ef3ce not found: ID does not exist" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.318530 4923 scope.go:117] "RemoveContainer" containerID="2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.319394 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc"} err="failed to get container status \"2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc\": rpc error: code = NotFound desc = could not find container \"2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc\": container with ID starting with 2c6af17f3a0b4920648006f5dafab8a6c719ec07a8ec9a90734a36eafdba2dfc not found: ID does not exist" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.319418 4923 scope.go:117] "RemoveContainer" containerID="7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.319786 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3"} err="failed to get container status \"7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3\": rpc error: code = NotFound desc = could not find container \"7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3\": container with ID starting with 7d19ba288e4dd082e7a5f8d1fbc6ccc3b978a661c281c8e6563aa643ec662cb3 not found: ID does not exist" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.319815 4923 scope.go:117] "RemoveContainer" containerID="ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.320144 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569"} err="failed to get container status \"ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569\": rpc error: code = NotFound desc = could not find container \"ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569\": container with ID starting with ef46ca768497249c4c9dc67c8605e65e632157e77df8ba6f4ae403be25933569 not found: ID does not exist" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.397021 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.397068 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.397112 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1229884-4226-423a-b37f-4b1ee6f24044-run-httpd\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.397133 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1229884-4226-423a-b37f-4b1ee6f24044-log-httpd\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.397218 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-scripts\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.397245 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djs9k\" (UniqueName: \"kubernetes.io/projected/c1229884-4226-423a-b37f-4b1ee6f24044-kube-api-access-djs9k\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.397272 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-config-data\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.499166 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-config-data\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.499266 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.499309 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.499391 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1229884-4226-423a-b37f-4b1ee6f24044-run-httpd\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.499597 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1229884-4226-423a-b37f-4b1ee6f24044-log-httpd\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.499680 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-scripts\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.499714 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djs9k\" (UniqueName: \"kubernetes.io/projected/c1229884-4226-423a-b37f-4b1ee6f24044-kube-api-access-djs9k\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.500075 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1229884-4226-423a-b37f-4b1ee6f24044-run-httpd\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.500187 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1229884-4226-423a-b37f-4b1ee6f24044-log-httpd\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.512631 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-scripts\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.512927 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.513044 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.514899 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-config-data\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.515355 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djs9k\" (UniqueName: \"kubernetes.io/projected/c1229884-4226-423a-b37f-4b1ee6f24044-kube-api-access-djs9k\") pod \"ceilometer-0\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " pod="openstack/ceilometer-0" Nov 28 11:29:19 crc kubenswrapper[4923]: I1128 11:29:19.585394 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:29:20 crc kubenswrapper[4923]: I1128 11:29:20.016477 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:20 crc kubenswrapper[4923]: I1128 11:29:20.189441 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1229884-4226-423a-b37f-4b1ee6f24044","Type":"ContainerStarted","Data":"8ade23a78d27fdd2e531464e4da09900fc7ad6ccc7577605f40c7b6347d9bb32"} Nov 28 11:29:21 crc kubenswrapper[4923]: E1128 11:29:21.150344 4923 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0577bf3a_49d8_4540_92f3_fa1703570c2d.slice/crio-conmon-1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9.scope\": RecentStats: unable to find data in memory cache]" Nov 28 11:29:21 crc kubenswrapper[4923]: I1128 11:29:21.179019 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6609a58-f8a7-4e87-8f95-c144d662c613" path="/var/lib/kubelet/pods/b6609a58-f8a7-4e87-8f95-c144d662c613/volumes" Nov 28 11:29:21 crc kubenswrapper[4923]: I1128 11:29:21.203538 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1229884-4226-423a-b37f-4b1ee6f24044","Type":"ContainerStarted","Data":"08ff6bf64e2f24657ef98da695188d51c1ab93e757a3a7fc87775d6f1fdb930b"} Nov 28 11:29:22 crc kubenswrapper[4923]: I1128 11:29:22.211222 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1229884-4226-423a-b37f-4b1ee6f24044","Type":"ContainerStarted","Data":"86be352faaf76b0d36a8e5e3cb91998f16f70976be28638e8e7d703b7f56b62b"} Nov 28 11:29:23 crc kubenswrapper[4923]: I1128 11:29:23.221849 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1229884-4226-423a-b37f-4b1ee6f24044","Type":"ContainerStarted","Data":"540b90e4651d81d5fdd253e3722984df38dba28aac06b8040f1fe62a9fdc03b1"} Nov 28 11:29:23 crc kubenswrapper[4923]: I1128 11:29:23.914212 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6vmzl"] Nov 28 11:29:23 crc kubenswrapper[4923]: I1128 11:29:23.915605 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6vmzl" Nov 28 11:29:23 crc kubenswrapper[4923]: I1128 11:29:23.917550 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 11:29:23 crc kubenswrapper[4923]: I1128 11:29:23.918868 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 28 11:29:23 crc kubenswrapper[4923]: I1128 11:29:23.919223 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-f5wvd" Nov 28 11:29:23 crc kubenswrapper[4923]: I1128 11:29:23.925505 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6vmzl"] Nov 28 11:29:24 crc kubenswrapper[4923]: I1128 11:29:24.037563 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5pwg\" (UniqueName: \"kubernetes.io/projected/a1a099ed-ae12-465a-b12d-f0bb966dfc64-kube-api-access-n5pwg\") pod \"nova-cell0-conductor-db-sync-6vmzl\" (UID: \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\") " pod="openstack/nova-cell0-conductor-db-sync-6vmzl" Nov 28 11:29:24 crc kubenswrapper[4923]: I1128 11:29:24.037858 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1a099ed-ae12-465a-b12d-f0bb966dfc64-scripts\") pod \"nova-cell0-conductor-db-sync-6vmzl\" (UID: \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\") " pod="openstack/nova-cell0-conductor-db-sync-6vmzl" Nov 28 11:29:24 crc kubenswrapper[4923]: I1128 11:29:24.037983 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1a099ed-ae12-465a-b12d-f0bb966dfc64-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-6vmzl\" (UID: \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\") " pod="openstack/nova-cell0-conductor-db-sync-6vmzl" Nov 28 11:29:24 crc kubenswrapper[4923]: I1128 11:29:24.038135 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1a099ed-ae12-465a-b12d-f0bb966dfc64-config-data\") pod \"nova-cell0-conductor-db-sync-6vmzl\" (UID: \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\") " pod="openstack/nova-cell0-conductor-db-sync-6vmzl" Nov 28 11:29:24 crc kubenswrapper[4923]: I1128 11:29:24.140046 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1a099ed-ae12-465a-b12d-f0bb966dfc64-scripts\") pod \"nova-cell0-conductor-db-sync-6vmzl\" (UID: \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\") " pod="openstack/nova-cell0-conductor-db-sync-6vmzl" Nov 28 11:29:24 crc kubenswrapper[4923]: I1128 11:29:24.140177 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1a099ed-ae12-465a-b12d-f0bb966dfc64-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-6vmzl\" (UID: \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\") " pod="openstack/nova-cell0-conductor-db-sync-6vmzl" Nov 28 11:29:24 crc kubenswrapper[4923]: I1128 11:29:24.140238 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1a099ed-ae12-465a-b12d-f0bb966dfc64-config-data\") pod \"nova-cell0-conductor-db-sync-6vmzl\" (UID: \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\") " pod="openstack/nova-cell0-conductor-db-sync-6vmzl" Nov 28 11:29:24 crc kubenswrapper[4923]: I1128 11:29:24.140277 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5pwg\" (UniqueName: \"kubernetes.io/projected/a1a099ed-ae12-465a-b12d-f0bb966dfc64-kube-api-access-n5pwg\") pod \"nova-cell0-conductor-db-sync-6vmzl\" (UID: \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\") " pod="openstack/nova-cell0-conductor-db-sync-6vmzl" Nov 28 11:29:24 crc kubenswrapper[4923]: I1128 11:29:24.146428 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1a099ed-ae12-465a-b12d-f0bb966dfc64-scripts\") pod \"nova-cell0-conductor-db-sync-6vmzl\" (UID: \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\") " pod="openstack/nova-cell0-conductor-db-sync-6vmzl" Nov 28 11:29:24 crc kubenswrapper[4923]: I1128 11:29:24.152492 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1a099ed-ae12-465a-b12d-f0bb966dfc64-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-6vmzl\" (UID: \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\") " pod="openstack/nova-cell0-conductor-db-sync-6vmzl" Nov 28 11:29:24 crc kubenswrapper[4923]: I1128 11:29:24.153009 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1a099ed-ae12-465a-b12d-f0bb966dfc64-config-data\") pod \"nova-cell0-conductor-db-sync-6vmzl\" (UID: \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\") " pod="openstack/nova-cell0-conductor-db-sync-6vmzl" Nov 28 11:29:24 crc kubenswrapper[4923]: I1128 11:29:24.163394 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5pwg\" (UniqueName: \"kubernetes.io/projected/a1a099ed-ae12-465a-b12d-f0bb966dfc64-kube-api-access-n5pwg\") pod \"nova-cell0-conductor-db-sync-6vmzl\" (UID: \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\") " pod="openstack/nova-cell0-conductor-db-sync-6vmzl" Nov 28 11:29:24 crc kubenswrapper[4923]: I1128 11:29:24.230315 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6vmzl" Nov 28 11:29:24 crc kubenswrapper[4923]: I1128 11:29:24.705503 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6vmzl"] Nov 28 11:29:24 crc kubenswrapper[4923]: W1128 11:29:24.713832 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda1a099ed_ae12_465a_b12d_f0bb966dfc64.slice/crio-225e4e28678c75cc6306f27bebc9e83eb682a41744c9ffb6884877519fdfb2e0 WatchSource:0}: Error finding container 225e4e28678c75cc6306f27bebc9e83eb682a41744c9ffb6884877519fdfb2e0: Status 404 returned error can't find the container with id 225e4e28678c75cc6306f27bebc9e83eb682a41744c9ffb6884877519fdfb2e0 Nov 28 11:29:25 crc kubenswrapper[4923]: I1128 11:29:25.243899 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6vmzl" event={"ID":"a1a099ed-ae12-465a-b12d-f0bb966dfc64","Type":"ContainerStarted","Data":"225e4e28678c75cc6306f27bebc9e83eb682a41744c9ffb6884877519fdfb2e0"} Nov 28 11:29:25 crc kubenswrapper[4923]: I1128 11:29:25.248423 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1229884-4226-423a-b37f-4b1ee6f24044","Type":"ContainerStarted","Data":"958a2ae2f75fb46838f5f077175af00dc5e904edc4ff3112bd19f46b219c166e"} Nov 28 11:29:25 crc kubenswrapper[4923]: I1128 11:29:25.248617 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 11:29:25 crc kubenswrapper[4923]: I1128 11:29:25.293138 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.912764345 podStartE2EDuration="6.293116351s" podCreationTimestamp="2025-11-28 11:29:19 +0000 UTC" firstStartedPulling="2025-11-28 11:29:20.024288072 +0000 UTC m=+1239.152972272" lastFinishedPulling="2025-11-28 11:29:24.404640068 +0000 UTC m=+1243.533324278" observedRunningTime="2025-11-28 11:29:25.287521465 +0000 UTC m=+1244.416205685" watchObservedRunningTime="2025-11-28 11:29:25.293116351 +0000 UTC m=+1244.421800571" Nov 28 11:29:31 crc kubenswrapper[4923]: E1128 11:29:31.401665 4923 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0577bf3a_49d8_4540_92f3_fa1703570c2d.slice/crio-conmon-1064fe83c605861b84c06036c63c49d98500fec804f45b7a55d9c8633a752af9.scope\": RecentStats: unable to find data in memory cache]" Nov 28 11:29:34 crc kubenswrapper[4923]: I1128 11:29:34.351897 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6vmzl" event={"ID":"a1a099ed-ae12-465a-b12d-f0bb966dfc64","Type":"ContainerStarted","Data":"c0ef187ebe0dfae485de6607fa0a6262f6a611373d82f1b47fa891faebac413a"} Nov 28 11:29:34 crc kubenswrapper[4923]: I1128 11:29:34.385154 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-6vmzl" podStartSLOduration=2.054849871 podStartE2EDuration="11.385128172s" podCreationTimestamp="2025-11-28 11:29:23 +0000 UTC" firstStartedPulling="2025-11-28 11:29:24.717728803 +0000 UTC m=+1243.846413023" lastFinishedPulling="2025-11-28 11:29:34.048007084 +0000 UTC m=+1253.176691324" observedRunningTime="2025-11-28 11:29:34.371065288 +0000 UTC m=+1253.499749548" watchObservedRunningTime="2025-11-28 11:29:34.385128172 +0000 UTC m=+1253.513812422" Nov 28 11:29:44 crc kubenswrapper[4923]: I1128 11:29:44.478173 4923 generic.go:334] "Generic (PLEG): container finished" podID="a1a099ed-ae12-465a-b12d-f0bb966dfc64" containerID="c0ef187ebe0dfae485de6607fa0a6262f6a611373d82f1b47fa891faebac413a" exitCode=0 Nov 28 11:29:44 crc kubenswrapper[4923]: I1128 11:29:44.479339 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6vmzl" event={"ID":"a1a099ed-ae12-465a-b12d-f0bb966dfc64","Type":"ContainerDied","Data":"c0ef187ebe0dfae485de6607fa0a6262f6a611373d82f1b47fa891faebac413a"} Nov 28 11:29:45 crc kubenswrapper[4923]: I1128 11:29:45.902453 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6vmzl" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.013064 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n5pwg\" (UniqueName: \"kubernetes.io/projected/a1a099ed-ae12-465a-b12d-f0bb966dfc64-kube-api-access-n5pwg\") pod \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\" (UID: \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\") " Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.013119 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1a099ed-ae12-465a-b12d-f0bb966dfc64-config-data\") pod \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\" (UID: \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\") " Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.013158 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1a099ed-ae12-465a-b12d-f0bb966dfc64-combined-ca-bundle\") pod \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\" (UID: \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\") " Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.013223 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1a099ed-ae12-465a-b12d-f0bb966dfc64-scripts\") pod \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\" (UID: \"a1a099ed-ae12-465a-b12d-f0bb966dfc64\") " Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.025021 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1a099ed-ae12-465a-b12d-f0bb966dfc64-scripts" (OuterVolumeSpecName: "scripts") pod "a1a099ed-ae12-465a-b12d-f0bb966dfc64" (UID: "a1a099ed-ae12-465a-b12d-f0bb966dfc64"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.031154 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1a099ed-ae12-465a-b12d-f0bb966dfc64-kube-api-access-n5pwg" (OuterVolumeSpecName: "kube-api-access-n5pwg") pod "a1a099ed-ae12-465a-b12d-f0bb966dfc64" (UID: "a1a099ed-ae12-465a-b12d-f0bb966dfc64"). InnerVolumeSpecName "kube-api-access-n5pwg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.054365 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1a099ed-ae12-465a-b12d-f0bb966dfc64-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a1a099ed-ae12-465a-b12d-f0bb966dfc64" (UID: "a1a099ed-ae12-465a-b12d-f0bb966dfc64"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.059199 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1a099ed-ae12-465a-b12d-f0bb966dfc64-config-data" (OuterVolumeSpecName: "config-data") pod "a1a099ed-ae12-465a-b12d-f0bb966dfc64" (UID: "a1a099ed-ae12-465a-b12d-f0bb966dfc64"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.114566 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n5pwg\" (UniqueName: \"kubernetes.io/projected/a1a099ed-ae12-465a-b12d-f0bb966dfc64-kube-api-access-n5pwg\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.114601 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1a099ed-ae12-465a-b12d-f0bb966dfc64-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.114615 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1a099ed-ae12-465a-b12d-f0bb966dfc64-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.114626 4923 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1a099ed-ae12-465a-b12d-f0bb966dfc64-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.506052 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6vmzl" event={"ID":"a1a099ed-ae12-465a-b12d-f0bb966dfc64","Type":"ContainerDied","Data":"225e4e28678c75cc6306f27bebc9e83eb682a41744c9ffb6884877519fdfb2e0"} Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.506090 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="225e4e28678c75cc6306f27bebc9e83eb682a41744c9ffb6884877519fdfb2e0" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.506126 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6vmzl" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.640758 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 11:29:46 crc kubenswrapper[4923]: E1128 11:29:46.641401 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1a099ed-ae12-465a-b12d-f0bb966dfc64" containerName="nova-cell0-conductor-db-sync" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.641495 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1a099ed-ae12-465a-b12d-f0bb966dfc64" containerName="nova-cell0-conductor-db-sync" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.641779 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1a099ed-ae12-465a-b12d-f0bb966dfc64" containerName="nova-cell0-conductor-db-sync" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.642639 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.645073 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.645300 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-f5wvd" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.660319 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.827413 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfece337-1342-4f01-b441-23dc879cb54d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"bfece337-1342-4f01-b441-23dc879cb54d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.827745 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfece337-1342-4f01-b441-23dc879cb54d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"bfece337-1342-4f01-b441-23dc879cb54d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.827856 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2cgp\" (UniqueName: \"kubernetes.io/projected/bfece337-1342-4f01-b441-23dc879cb54d-kube-api-access-q2cgp\") pod \"nova-cell0-conductor-0\" (UID: \"bfece337-1342-4f01-b441-23dc879cb54d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.930237 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfece337-1342-4f01-b441-23dc879cb54d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"bfece337-1342-4f01-b441-23dc879cb54d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.930360 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfece337-1342-4f01-b441-23dc879cb54d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"bfece337-1342-4f01-b441-23dc879cb54d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.930477 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2cgp\" (UniqueName: \"kubernetes.io/projected/bfece337-1342-4f01-b441-23dc879cb54d-kube-api-access-q2cgp\") pod \"nova-cell0-conductor-0\" (UID: \"bfece337-1342-4f01-b441-23dc879cb54d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.936166 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfece337-1342-4f01-b441-23dc879cb54d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"bfece337-1342-4f01-b441-23dc879cb54d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.936645 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfece337-1342-4f01-b441-23dc879cb54d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"bfece337-1342-4f01-b441-23dc879cb54d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 11:29:46 crc kubenswrapper[4923]: I1128 11:29:46.958403 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2cgp\" (UniqueName: \"kubernetes.io/projected/bfece337-1342-4f01-b441-23dc879cb54d-kube-api-access-q2cgp\") pod \"nova-cell0-conductor-0\" (UID: \"bfece337-1342-4f01-b441-23dc879cb54d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 11:29:47 crc kubenswrapper[4923]: I1128 11:29:47.016121 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 11:29:47 crc kubenswrapper[4923]: I1128 11:29:47.450590 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 11:29:47 crc kubenswrapper[4923]: W1128 11:29:47.462365 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbfece337_1342_4f01_b441_23dc879cb54d.slice/crio-522442c717adf1ba37c7dfc316e501b2f830fe6d005625da1d2afdf5cc1af04d WatchSource:0}: Error finding container 522442c717adf1ba37c7dfc316e501b2f830fe6d005625da1d2afdf5cc1af04d: Status 404 returned error can't find the container with id 522442c717adf1ba37c7dfc316e501b2f830fe6d005625da1d2afdf5cc1af04d Nov 28 11:29:47 crc kubenswrapper[4923]: I1128 11:29:47.521757 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"bfece337-1342-4f01-b441-23dc879cb54d","Type":"ContainerStarted","Data":"522442c717adf1ba37c7dfc316e501b2f830fe6d005625da1d2afdf5cc1af04d"} Nov 28 11:29:48 crc kubenswrapper[4923]: I1128 11:29:48.535519 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"bfece337-1342-4f01-b441-23dc879cb54d","Type":"ContainerStarted","Data":"95cd3d5bd4086dcd1c0cc21df2cf099b0e41734b01a1b98fc562c16ea880889b"} Nov 28 11:29:48 crc kubenswrapper[4923]: I1128 11:29:48.535987 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 28 11:29:48 crc kubenswrapper[4923]: I1128 11:29:48.570751 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.5707260549999997 podStartE2EDuration="2.570726055s" podCreationTimestamp="2025-11-28 11:29:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:29:48.565145219 +0000 UTC m=+1267.693829519" watchObservedRunningTime="2025-11-28 11:29:48.570726055 +0000 UTC m=+1267.699410295" Nov 28 11:29:49 crc kubenswrapper[4923]: I1128 11:29:49.593911 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.169045 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.426518 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.426718 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="21f79010-cee5-4d8c-8af5-cab32d6b0031" containerName="kube-state-metrics" containerID="cri-o://1736bac7b866d033abb565e298ecff59892496d4ccf42e13ef69d0f1c0229351" gracePeriod=30 Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.574281 4923 generic.go:334] "Generic (PLEG): container finished" podID="21f79010-cee5-4d8c-8af5-cab32d6b0031" containerID="1736bac7b866d033abb565e298ecff59892496d4ccf42e13ef69d0f1c0229351" exitCode=2 Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.574363 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"21f79010-cee5-4d8c-8af5-cab32d6b0031","Type":"ContainerDied","Data":"1736bac7b866d033abb565e298ecff59892496d4ccf42e13ef69d0f1c0229351"} Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.678092 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-8xtzd"] Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.679194 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-8xtzd" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.683723 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.687296 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.698676 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-8xtzd"] Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.850799 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.850827 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d26d3820-97ce-42bc-92c8-c48082942764-config-data\") pod \"nova-cell0-cell-mapping-8xtzd\" (UID: \"d26d3820-97ce-42bc-92c8-c48082942764\") " pod="openstack/nova-cell0-cell-mapping-8xtzd" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.850891 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9429\" (UniqueName: \"kubernetes.io/projected/d26d3820-97ce-42bc-92c8-c48082942764-kube-api-access-f9429\") pod \"nova-cell0-cell-mapping-8xtzd\" (UID: \"d26d3820-97ce-42bc-92c8-c48082942764\") " pod="openstack/nova-cell0-cell-mapping-8xtzd" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.850993 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d26d3820-97ce-42bc-92c8-c48082942764-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-8xtzd\" (UID: \"d26d3820-97ce-42bc-92c8-c48082942764\") " pod="openstack/nova-cell0-cell-mapping-8xtzd" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.851014 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d26d3820-97ce-42bc-92c8-c48082942764-scripts\") pod \"nova-cell0-cell-mapping-8xtzd\" (UID: \"d26d3820-97ce-42bc-92c8-c48082942764\") " pod="openstack/nova-cell0-cell-mapping-8xtzd" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.852697 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.856534 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.874904 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.876030 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.881788 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.887632 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.915074 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.952049 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb5f755a-29cb-453a-b61d-288cea6bba2e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"fb5f755a-29cb-453a-b61d-288cea6bba2e\") " pod="openstack/nova-api-0" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.952302 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb5f755a-29cb-453a-b61d-288cea6bba2e-logs\") pod \"nova-api-0\" (UID: \"fb5f755a-29cb-453a-b61d-288cea6bba2e\") " pod="openstack/nova-api-0" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.952451 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d26d3820-97ce-42bc-92c8-c48082942764-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-8xtzd\" (UID: \"d26d3820-97ce-42bc-92c8-c48082942764\") " pod="openstack/nova-cell0-cell-mapping-8xtzd" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.952542 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d26d3820-97ce-42bc-92c8-c48082942764-scripts\") pod \"nova-cell0-cell-mapping-8xtzd\" (UID: \"d26d3820-97ce-42bc-92c8-c48082942764\") " pod="openstack/nova-cell0-cell-mapping-8xtzd" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.952632 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwt4z\" (UniqueName: \"kubernetes.io/projected/fb5f755a-29cb-453a-b61d-288cea6bba2e-kube-api-access-kwt4z\") pod \"nova-api-0\" (UID: \"fb5f755a-29cb-453a-b61d-288cea6bba2e\") " pod="openstack/nova-api-0" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.952748 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d26d3820-97ce-42bc-92c8-c48082942764-config-data\") pod \"nova-cell0-cell-mapping-8xtzd\" (UID: \"d26d3820-97ce-42bc-92c8-c48082942764\") " pod="openstack/nova-cell0-cell-mapping-8xtzd" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.952852 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9429\" (UniqueName: \"kubernetes.io/projected/d26d3820-97ce-42bc-92c8-c48082942764-kube-api-access-f9429\") pod \"nova-cell0-cell-mapping-8xtzd\" (UID: \"d26d3820-97ce-42bc-92c8-c48082942764\") " pod="openstack/nova-cell0-cell-mapping-8xtzd" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.953009 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb5f755a-29cb-453a-b61d-288cea6bba2e-config-data\") pod \"nova-api-0\" (UID: \"fb5f755a-29cb-453a-b61d-288cea6bba2e\") " pod="openstack/nova-api-0" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.985896 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d26d3820-97ce-42bc-92c8-c48082942764-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-8xtzd\" (UID: \"d26d3820-97ce-42bc-92c8-c48082942764\") " pod="openstack/nova-cell0-cell-mapping-8xtzd" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.986650 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d26d3820-97ce-42bc-92c8-c48082942764-scripts\") pod \"nova-cell0-cell-mapping-8xtzd\" (UID: \"d26d3820-97ce-42bc-92c8-c48082942764\") " pod="openstack/nova-cell0-cell-mapping-8xtzd" Nov 28 11:29:52 crc kubenswrapper[4923]: I1128 11:29:52.989559 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d26d3820-97ce-42bc-92c8-c48082942764-config-data\") pod \"nova-cell0-cell-mapping-8xtzd\" (UID: \"d26d3820-97ce-42bc-92c8-c48082942764\") " pod="openstack/nova-cell0-cell-mapping-8xtzd" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.001563 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9429\" (UniqueName: \"kubernetes.io/projected/d26d3820-97ce-42bc-92c8-c48082942764-kube-api-access-f9429\") pod \"nova-cell0-cell-mapping-8xtzd\" (UID: \"d26d3820-97ce-42bc-92c8-c48082942764\") " pod="openstack/nova-cell0-cell-mapping-8xtzd" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.019419 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-8xtzd" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.025418 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.026428 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.030294 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.041137 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.069059 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.069122 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5b84f3f-7303-45e3-8785-391209a4b7d7-config-data\") pod \"nova-scheduler-0\" (UID: \"d5b84f3f-7303-45e3-8785-391209a4b7d7\") " pod="openstack/nova-scheduler-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.069148 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5b84f3f-7303-45e3-8785-391209a4b7d7-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d5b84f3f-7303-45e3-8785-391209a4b7d7\") " pod="openstack/nova-scheduler-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.069168 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb5f755a-29cb-453a-b61d-288cea6bba2e-config-data\") pod \"nova-api-0\" (UID: \"fb5f755a-29cb-453a-b61d-288cea6bba2e\") " pod="openstack/nova-api-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.069190 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb5f755a-29cb-453a-b61d-288cea6bba2e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"fb5f755a-29cb-453a-b61d-288cea6bba2e\") " pod="openstack/nova-api-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.069222 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.069252 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb5f755a-29cb-453a-b61d-288cea6bba2e-logs\") pod \"nova-api-0\" (UID: \"fb5f755a-29cb-453a-b61d-288cea6bba2e\") " pod="openstack/nova-api-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.069299 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwt4z\" (UniqueName: \"kubernetes.io/projected/fb5f755a-29cb-453a-b61d-288cea6bba2e-kube-api-access-kwt4z\") pod \"nova-api-0\" (UID: \"fb5f755a-29cb-453a-b61d-288cea6bba2e\") " pod="openstack/nova-api-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.069314 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2drt\" (UniqueName: \"kubernetes.io/projected/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc-kube-api-access-m2drt\") pod \"nova-cell1-novncproxy-0\" (UID: \"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.069335 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-686mb\" (UniqueName: \"kubernetes.io/projected/d5b84f3f-7303-45e3-8785-391209a4b7d7-kube-api-access-686mb\") pod \"nova-scheduler-0\" (UID: \"d5b84f3f-7303-45e3-8785-391209a4b7d7\") " pod="openstack/nova-scheduler-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.071414 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb5f755a-29cb-453a-b61d-288cea6bba2e-logs\") pod \"nova-api-0\" (UID: \"fb5f755a-29cb-453a-b61d-288cea6bba2e\") " pod="openstack/nova-api-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.080460 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb5f755a-29cb-453a-b61d-288cea6bba2e-config-data\") pod \"nova-api-0\" (UID: \"fb5f755a-29cb-453a-b61d-288cea6bba2e\") " pod="openstack/nova-api-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.116577 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb5f755a-29cb-453a-b61d-288cea6bba2e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"fb5f755a-29cb-453a-b61d-288cea6bba2e\") " pod="openstack/nova-api-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.135293 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwt4z\" (UniqueName: \"kubernetes.io/projected/fb5f755a-29cb-453a-b61d-288cea6bba2e-kube-api-access-kwt4z\") pod \"nova-api-0\" (UID: \"fb5f755a-29cb-453a-b61d-288cea6bba2e\") " pod="openstack/nova-api-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.142002 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.153365 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.153537 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.158386 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.170511 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nwddd\" (UniqueName: \"kubernetes.io/projected/21f79010-cee5-4d8c-8af5-cab32d6b0031-kube-api-access-nwddd\") pod \"21f79010-cee5-4d8c-8af5-cab32d6b0031\" (UID: \"21f79010-cee5-4d8c-8af5-cab32d6b0031\") " Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.170736 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.170793 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2drt\" (UniqueName: \"kubernetes.io/projected/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc-kube-api-access-m2drt\") pod \"nova-cell1-novncproxy-0\" (UID: \"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.170812 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcbca10e-dedc-4268-a109-a980d385561d-config-data\") pod \"nova-metadata-0\" (UID: \"bcbca10e-dedc-4268-a109-a980d385561d\") " pod="openstack/nova-metadata-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.170832 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bcbca10e-dedc-4268-a109-a980d385561d-logs\") pod \"nova-metadata-0\" (UID: \"bcbca10e-dedc-4268-a109-a980d385561d\") " pod="openstack/nova-metadata-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.170848 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-686mb\" (UniqueName: \"kubernetes.io/projected/d5b84f3f-7303-45e3-8785-391209a4b7d7-kube-api-access-686mb\") pod \"nova-scheduler-0\" (UID: \"d5b84f3f-7303-45e3-8785-391209a4b7d7\") " pod="openstack/nova-scheduler-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.170875 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.170903 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w487s\" (UniqueName: \"kubernetes.io/projected/bcbca10e-dedc-4268-a109-a980d385561d-kube-api-access-w487s\") pod \"nova-metadata-0\" (UID: \"bcbca10e-dedc-4268-a109-a980d385561d\") " pod="openstack/nova-metadata-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.170940 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5b84f3f-7303-45e3-8785-391209a4b7d7-config-data\") pod \"nova-scheduler-0\" (UID: \"d5b84f3f-7303-45e3-8785-391209a4b7d7\") " pod="openstack/nova-scheduler-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.170964 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5b84f3f-7303-45e3-8785-391209a4b7d7-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d5b84f3f-7303-45e3-8785-391209a4b7d7\") " pod="openstack/nova-scheduler-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.170980 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcbca10e-dedc-4268-a109-a980d385561d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"bcbca10e-dedc-4268-a109-a980d385561d\") " pod="openstack/nova-metadata-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.176378 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.177178 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5b84f3f-7303-45e3-8785-391209a4b7d7-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d5b84f3f-7303-45e3-8785-391209a4b7d7\") " pod="openstack/nova-scheduler-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.178352 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.180919 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.182081 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5b84f3f-7303-45e3-8785-391209a4b7d7-config-data\") pod \"nova-scheduler-0\" (UID: \"d5b84f3f-7303-45e3-8785-391209a4b7d7\") " pod="openstack/nova-scheduler-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.201201 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-686mb\" (UniqueName: \"kubernetes.io/projected/d5b84f3f-7303-45e3-8785-391209a4b7d7-kube-api-access-686mb\") pod \"nova-scheduler-0\" (UID: \"d5b84f3f-7303-45e3-8785-391209a4b7d7\") " pod="openstack/nova-scheduler-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.202404 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.202398 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.209154 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21f79010-cee5-4d8c-8af5-cab32d6b0031-kube-api-access-nwddd" (OuterVolumeSpecName: "kube-api-access-nwddd") pod "21f79010-cee5-4d8c-8af5-cab32d6b0031" (UID: "21f79010-cee5-4d8c-8af5-cab32d6b0031"). InnerVolumeSpecName "kube-api-access-nwddd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.215073 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-zr9q6"] Nov 28 11:29:53 crc kubenswrapper[4923]: E1128 11:29:53.215423 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21f79010-cee5-4d8c-8af5-cab32d6b0031" containerName="kube-state-metrics" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.215440 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="21f79010-cee5-4d8c-8af5-cab32d6b0031" containerName="kube-state-metrics" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.215581 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="21f79010-cee5-4d8c-8af5-cab32d6b0031" containerName="kube-state-metrics" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.216414 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.230157 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2drt\" (UniqueName: \"kubernetes.io/projected/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc-kube-api-access-m2drt\") pod \"nova-cell1-novncproxy-0\" (UID: \"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.274602 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-dns-svc\") pod \"dnsmasq-dns-566b5b7845-zr9q6\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.274874 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-ovsdbserver-nb\") pod \"dnsmasq-dns-566b5b7845-zr9q6\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.275012 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcbca10e-dedc-4268-a109-a980d385561d-config-data\") pod \"nova-metadata-0\" (UID: \"bcbca10e-dedc-4268-a109-a980d385561d\") " pod="openstack/nova-metadata-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.275107 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bcbca10e-dedc-4268-a109-a980d385561d-logs\") pod \"nova-metadata-0\" (UID: \"bcbca10e-dedc-4268-a109-a980d385561d\") " pod="openstack/nova-metadata-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.275922 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bcbca10e-dedc-4268-a109-a980d385561d-logs\") pod \"nova-metadata-0\" (UID: \"bcbca10e-dedc-4268-a109-a980d385561d\") " pod="openstack/nova-metadata-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.276142 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w487s\" (UniqueName: \"kubernetes.io/projected/bcbca10e-dedc-4268-a109-a980d385561d-kube-api-access-w487s\") pod \"nova-metadata-0\" (UID: \"bcbca10e-dedc-4268-a109-a980d385561d\") " pod="openstack/nova-metadata-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.276281 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcbca10e-dedc-4268-a109-a980d385561d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"bcbca10e-dedc-4268-a109-a980d385561d\") " pod="openstack/nova-metadata-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.276395 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-config\") pod \"dnsmasq-dns-566b5b7845-zr9q6\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.276484 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-ovsdbserver-sb\") pod \"dnsmasq-dns-566b5b7845-zr9q6\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.276583 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kmr6\" (UniqueName: \"kubernetes.io/projected/82518235-c0d1-44e4-b4c9-811888c5d245-kube-api-access-7kmr6\") pod \"dnsmasq-dns-566b5b7845-zr9q6\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.276812 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nwddd\" (UniqueName: \"kubernetes.io/projected/21f79010-cee5-4d8c-8af5-cab32d6b0031-kube-api-access-nwddd\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.287634 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcbca10e-dedc-4268-a109-a980d385561d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"bcbca10e-dedc-4268-a109-a980d385561d\") " pod="openstack/nova-metadata-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.298264 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcbca10e-dedc-4268-a109-a980d385561d-config-data\") pod \"nova-metadata-0\" (UID: \"bcbca10e-dedc-4268-a109-a980d385561d\") " pod="openstack/nova-metadata-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.309253 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w487s\" (UniqueName: \"kubernetes.io/projected/bcbca10e-dedc-4268-a109-a980d385561d-kube-api-access-w487s\") pod \"nova-metadata-0\" (UID: \"bcbca10e-dedc-4268-a109-a980d385561d\") " pod="openstack/nova-metadata-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.383134 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-dns-svc\") pod \"dnsmasq-dns-566b5b7845-zr9q6\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.383588 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-ovsdbserver-nb\") pod \"dnsmasq-dns-566b5b7845-zr9q6\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.383769 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-config\") pod \"dnsmasq-dns-566b5b7845-zr9q6\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.383868 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-ovsdbserver-sb\") pod \"dnsmasq-dns-566b5b7845-zr9q6\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.383974 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kmr6\" (UniqueName: \"kubernetes.io/projected/82518235-c0d1-44e4-b4c9-811888c5d245-kube-api-access-7kmr6\") pod \"dnsmasq-dns-566b5b7845-zr9q6\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.385298 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-ovsdbserver-nb\") pod \"dnsmasq-dns-566b5b7845-zr9q6\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.385816 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-dns-svc\") pod \"dnsmasq-dns-566b5b7845-zr9q6\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.387522 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-config\") pod \"dnsmasq-dns-566b5b7845-zr9q6\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.392354 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-ovsdbserver-sb\") pod \"dnsmasq-dns-566b5b7845-zr9q6\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.399047 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-zr9q6"] Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.436252 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.437520 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kmr6\" (UniqueName: \"kubernetes.io/projected/82518235-c0d1-44e4-b4c9-811888c5d245-kube-api-access-7kmr6\") pod \"dnsmasq-dns-566b5b7845-zr9q6\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.506445 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.592243 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.626065 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"21f79010-cee5-4d8c-8af5-cab32d6b0031","Type":"ContainerDied","Data":"b4f8cbfe2299b472630863ab3fbe97b790841988c7e87c746d8542dab415192c"} Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.626117 4923 scope.go:117] "RemoveContainer" containerID="1736bac7b866d033abb565e298ecff59892496d4ccf42e13ef69d0f1c0229351" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.626251 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.732660 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.741304 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.767073 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.770650 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.774647 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.775293 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.787457 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.804760 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-8xtzd"] Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.893991 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntwvr\" (UniqueName: \"kubernetes.io/projected/f4883bd9-80ed-4500-806d-d1c2a04ebbbd-kube-api-access-ntwvr\") pod \"kube-state-metrics-0\" (UID: \"f4883bd9-80ed-4500-806d-d1c2a04ebbbd\") " pod="openstack/kube-state-metrics-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.894054 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f4883bd9-80ed-4500-806d-d1c2a04ebbbd-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"f4883bd9-80ed-4500-806d-d1c2a04ebbbd\") " pod="openstack/kube-state-metrics-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.894095 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4883bd9-80ed-4500-806d-d1c2a04ebbbd-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"f4883bd9-80ed-4500-806d-d1c2a04ebbbd\") " pod="openstack/kube-state-metrics-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.894177 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4883bd9-80ed-4500-806d-d1c2a04ebbbd-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"f4883bd9-80ed-4500-806d-d1c2a04ebbbd\") " pod="openstack/kube-state-metrics-0" Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.966453 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 11:29:53 crc kubenswrapper[4923]: I1128 11:29:53.996713 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.000055 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4883bd9-80ed-4500-806d-d1c2a04ebbbd-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"f4883bd9-80ed-4500-806d-d1c2a04ebbbd\") " pod="openstack/kube-state-metrics-0" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.000230 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4883bd9-80ed-4500-806d-d1c2a04ebbbd-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"f4883bd9-80ed-4500-806d-d1c2a04ebbbd\") " pod="openstack/kube-state-metrics-0" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.000303 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntwvr\" (UniqueName: \"kubernetes.io/projected/f4883bd9-80ed-4500-806d-d1c2a04ebbbd-kube-api-access-ntwvr\") pod \"kube-state-metrics-0\" (UID: \"f4883bd9-80ed-4500-806d-d1c2a04ebbbd\") " pod="openstack/kube-state-metrics-0" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.000356 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f4883bd9-80ed-4500-806d-d1c2a04ebbbd-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"f4883bd9-80ed-4500-806d-d1c2a04ebbbd\") " pod="openstack/kube-state-metrics-0" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.014600 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4883bd9-80ed-4500-806d-d1c2a04ebbbd-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"f4883bd9-80ed-4500-806d-d1c2a04ebbbd\") " pod="openstack/kube-state-metrics-0" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.015488 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f4883bd9-80ed-4500-806d-d1c2a04ebbbd-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"f4883bd9-80ed-4500-806d-d1c2a04ebbbd\") " pod="openstack/kube-state-metrics-0" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.027879 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4883bd9-80ed-4500-806d-d1c2a04ebbbd-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"f4883bd9-80ed-4500-806d-d1c2a04ebbbd\") " pod="openstack/kube-state-metrics-0" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.040035 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntwvr\" (UniqueName: \"kubernetes.io/projected/f4883bd9-80ed-4500-806d-d1c2a04ebbbd-kube-api-access-ntwvr\") pod \"kube-state-metrics-0\" (UID: \"f4883bd9-80ed-4500-806d-d1c2a04ebbbd\") " pod="openstack/kube-state-metrics-0" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.150909 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.193780 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 11:29:54 crc kubenswrapper[4923]: W1128 11:29:54.210799 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podddbc46aa_3e1f_4c6c_8549_73107fd2d5cc.slice/crio-9322871af9967c3e75ae25a09085ec0dbd4911150a7b1b12733258a433455235 WatchSource:0}: Error finding container 9322871af9967c3e75ae25a09085ec0dbd4911150a7b1b12733258a433455235: Status 404 returned error can't find the container with id 9322871af9967c3e75ae25a09085ec0dbd4911150a7b1b12733258a433455235 Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.280832 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.294108 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-zr9q6"] Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.402489 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rmq5g"] Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.404390 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-rmq5g" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.416155 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rmq5g"] Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.416809 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.417615 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.522630 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad75bb0-7f36-4ac3-b0a7-402237601802-config-data\") pod \"nova-cell1-conductor-db-sync-rmq5g\" (UID: \"3ad75bb0-7f36-4ac3-b0a7-402237601802\") " pod="openstack/nova-cell1-conductor-db-sync-rmq5g" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.522690 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hp5h\" (UniqueName: \"kubernetes.io/projected/3ad75bb0-7f36-4ac3-b0a7-402237601802-kube-api-access-7hp5h\") pod \"nova-cell1-conductor-db-sync-rmq5g\" (UID: \"3ad75bb0-7f36-4ac3-b0a7-402237601802\") " pod="openstack/nova-cell1-conductor-db-sync-rmq5g" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.522720 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad75bb0-7f36-4ac3-b0a7-402237601802-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-rmq5g\" (UID: \"3ad75bb0-7f36-4ac3-b0a7-402237601802\") " pod="openstack/nova-cell1-conductor-db-sync-rmq5g" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.522738 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ad75bb0-7f36-4ac3-b0a7-402237601802-scripts\") pod \"nova-cell1-conductor-db-sync-rmq5g\" (UID: \"3ad75bb0-7f36-4ac3-b0a7-402237601802\") " pod="openstack/nova-cell1-conductor-db-sync-rmq5g" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.592110 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.592346 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c1229884-4226-423a-b37f-4b1ee6f24044" containerName="ceilometer-central-agent" containerID="cri-o://08ff6bf64e2f24657ef98da695188d51c1ab93e757a3a7fc87775d6f1fdb930b" gracePeriod=30 Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.592401 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c1229884-4226-423a-b37f-4b1ee6f24044" containerName="proxy-httpd" containerID="cri-o://958a2ae2f75fb46838f5f077175af00dc5e904edc4ff3112bd19f46b219c166e" gracePeriod=30 Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.592490 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c1229884-4226-423a-b37f-4b1ee6f24044" containerName="sg-core" containerID="cri-o://540b90e4651d81d5fdd253e3722984df38dba28aac06b8040f1fe62a9fdc03b1" gracePeriod=30 Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.592564 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c1229884-4226-423a-b37f-4b1ee6f24044" containerName="ceilometer-notification-agent" containerID="cri-o://86be352faaf76b0d36a8e5e3cb91998f16f70976be28638e8e7d703b7f56b62b" gracePeriod=30 Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.625255 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad75bb0-7f36-4ac3-b0a7-402237601802-config-data\") pod \"nova-cell1-conductor-db-sync-rmq5g\" (UID: \"3ad75bb0-7f36-4ac3-b0a7-402237601802\") " pod="openstack/nova-cell1-conductor-db-sync-rmq5g" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.625314 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hp5h\" (UniqueName: \"kubernetes.io/projected/3ad75bb0-7f36-4ac3-b0a7-402237601802-kube-api-access-7hp5h\") pod \"nova-cell1-conductor-db-sync-rmq5g\" (UID: \"3ad75bb0-7f36-4ac3-b0a7-402237601802\") " pod="openstack/nova-cell1-conductor-db-sync-rmq5g" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.625345 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad75bb0-7f36-4ac3-b0a7-402237601802-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-rmq5g\" (UID: \"3ad75bb0-7f36-4ac3-b0a7-402237601802\") " pod="openstack/nova-cell1-conductor-db-sync-rmq5g" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.625361 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ad75bb0-7f36-4ac3-b0a7-402237601802-scripts\") pod \"nova-cell1-conductor-db-sync-rmq5g\" (UID: \"3ad75bb0-7f36-4ac3-b0a7-402237601802\") " pod="openstack/nova-cell1-conductor-db-sync-rmq5g" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.631505 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad75bb0-7f36-4ac3-b0a7-402237601802-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-rmq5g\" (UID: \"3ad75bb0-7f36-4ac3-b0a7-402237601802\") " pod="openstack/nova-cell1-conductor-db-sync-rmq5g" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.634152 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad75bb0-7f36-4ac3-b0a7-402237601802-config-data\") pod \"nova-cell1-conductor-db-sync-rmq5g\" (UID: \"3ad75bb0-7f36-4ac3-b0a7-402237601802\") " pod="openstack/nova-cell1-conductor-db-sync-rmq5g" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.646462 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ad75bb0-7f36-4ac3-b0a7-402237601802-scripts\") pod \"nova-cell1-conductor-db-sync-rmq5g\" (UID: \"3ad75bb0-7f36-4ac3-b0a7-402237601802\") " pod="openstack/nova-cell1-conductor-db-sync-rmq5g" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.653242 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hp5h\" (UniqueName: \"kubernetes.io/projected/3ad75bb0-7f36-4ac3-b0a7-402237601802-kube-api-access-7hp5h\") pod \"nova-cell1-conductor-db-sync-rmq5g\" (UID: \"3ad75bb0-7f36-4ac3-b0a7-402237601802\") " pod="openstack/nova-cell1-conductor-db-sync-rmq5g" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.667460 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d5b84f3f-7303-45e3-8785-391209a4b7d7","Type":"ContainerStarted","Data":"812c0473fc9f177cc0018c89540347e25239d05e5fb36076f453c2458e0c71c7"} Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.681863 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" event={"ID":"82518235-c0d1-44e4-b4c9-811888c5d245","Type":"ContainerStarted","Data":"13889e08b7cfb38a7844bae03d39123ebb3de645e328cd04528dbb09a10f0eb0"} Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.681902 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" event={"ID":"82518235-c0d1-44e4-b4c9-811888c5d245","Type":"ContainerStarted","Data":"60be3025fce828e7547e93c0d76b83cdf1bbea8e308ab992f96ca2e72eba9821"} Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.697714 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-8xtzd" event={"ID":"d26d3820-97ce-42bc-92c8-c48082942764","Type":"ContainerStarted","Data":"b371cb093ed219d127de4b09c49ab301c61daae66c5c0159d8c5f84d9db85a7a"} Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.697758 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-8xtzd" event={"ID":"d26d3820-97ce-42bc-92c8-c48082942764","Type":"ContainerStarted","Data":"13d868896b7e178a98d557b5f55ec5948e29255fe112a09d019415a589b9f790"} Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.734329 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fb5f755a-29cb-453a-b61d-288cea6bba2e","Type":"ContainerStarted","Data":"5bb85132126497c6f6e8ce12b0cda98ad59c895d3f5dd24f554ab444d7d8f5d4"} Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.745621 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bcbca10e-dedc-4268-a109-a980d385561d","Type":"ContainerStarted","Data":"e0ff1f5300ffc6b579b3d78d09cfa56105e74aa0ac7af68d3cf528ed8304fc0f"} Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.747692 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc","Type":"ContainerStarted","Data":"9322871af9967c3e75ae25a09085ec0dbd4911150a7b1b12733258a433455235"} Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.780269 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.780638 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-8xtzd" podStartSLOduration=2.78062963 podStartE2EDuration="2.78062963s" podCreationTimestamp="2025-11-28 11:29:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:29:54.733619864 +0000 UTC m=+1273.862304074" watchObservedRunningTime="2025-11-28 11:29:54.78062963 +0000 UTC m=+1273.909313840" Nov 28 11:29:54 crc kubenswrapper[4923]: I1128 11:29:54.784713 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-rmq5g" Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.181246 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21f79010-cee5-4d8c-8af5-cab32d6b0031" path="/var/lib/kubelet/pods/21f79010-cee5-4d8c-8af5-cab32d6b0031/volumes" Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.360449 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rmq5g"] Nov 28 11:29:55 crc kubenswrapper[4923]: W1128 11:29:55.376977 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3ad75bb0_7f36_4ac3_b0a7_402237601802.slice/crio-5701da96fa9d8dca59c533d8d0171fed2cd85fea4ec9991df03baacaaa1a998f WatchSource:0}: Error finding container 5701da96fa9d8dca59c533d8d0171fed2cd85fea4ec9991df03baacaaa1a998f: Status 404 returned error can't find the container with id 5701da96fa9d8dca59c533d8d0171fed2cd85fea4ec9991df03baacaaa1a998f Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.760755 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-rmq5g" event={"ID":"3ad75bb0-7f36-4ac3-b0a7-402237601802","Type":"ContainerStarted","Data":"7e9207943b07a406fe8f65daf852a00fa82fa2e38b70dafe5af1e90dda7afd40"} Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.760815 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-rmq5g" event={"ID":"3ad75bb0-7f36-4ac3-b0a7-402237601802","Type":"ContainerStarted","Data":"5701da96fa9d8dca59c533d8d0171fed2cd85fea4ec9991df03baacaaa1a998f"} Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.771448 4923 generic.go:334] "Generic (PLEG): container finished" podID="c1229884-4226-423a-b37f-4b1ee6f24044" containerID="958a2ae2f75fb46838f5f077175af00dc5e904edc4ff3112bd19f46b219c166e" exitCode=0 Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.771476 4923 generic.go:334] "Generic (PLEG): container finished" podID="c1229884-4226-423a-b37f-4b1ee6f24044" containerID="540b90e4651d81d5fdd253e3722984df38dba28aac06b8040f1fe62a9fdc03b1" exitCode=2 Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.771486 4923 generic.go:334] "Generic (PLEG): container finished" podID="c1229884-4226-423a-b37f-4b1ee6f24044" containerID="08ff6bf64e2f24657ef98da695188d51c1ab93e757a3a7fc87775d6f1fdb930b" exitCode=0 Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.772447 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1229884-4226-423a-b37f-4b1ee6f24044","Type":"ContainerDied","Data":"958a2ae2f75fb46838f5f077175af00dc5e904edc4ff3112bd19f46b219c166e"} Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.772481 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1229884-4226-423a-b37f-4b1ee6f24044","Type":"ContainerDied","Data":"540b90e4651d81d5fdd253e3722984df38dba28aac06b8040f1fe62a9fdc03b1"} Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.772491 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1229884-4226-423a-b37f-4b1ee6f24044","Type":"ContainerDied","Data":"08ff6bf64e2f24657ef98da695188d51c1ab93e757a3a7fc87775d6f1fdb930b"} Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.779491 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-rmq5g" podStartSLOduration=1.779477473 podStartE2EDuration="1.779477473s" podCreationTimestamp="2025-11-28 11:29:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:29:55.775290526 +0000 UTC m=+1274.903974736" watchObservedRunningTime="2025-11-28 11:29:55.779477473 +0000 UTC m=+1274.908161683" Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.782983 4923 generic.go:334] "Generic (PLEG): container finished" podID="82518235-c0d1-44e4-b4c9-811888c5d245" containerID="13889e08b7cfb38a7844bae03d39123ebb3de645e328cd04528dbb09a10f0eb0" exitCode=0 Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.783035 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" event={"ID":"82518235-c0d1-44e4-b4c9-811888c5d245","Type":"ContainerDied","Data":"13889e08b7cfb38a7844bae03d39123ebb3de645e328cd04528dbb09a10f0eb0"} Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.783110 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" event={"ID":"82518235-c0d1-44e4-b4c9-811888c5d245","Type":"ContainerStarted","Data":"34a1fdf8b8b78d9662d0775da5db012148144b78ea68b3bdbfdc243657f57f32"} Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.783619 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.790132 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f4883bd9-80ed-4500-806d-d1c2a04ebbbd","Type":"ContainerStarted","Data":"f663e320ee47f738bb855233fce5d54a039ecd88680c3f81e0ae38a95d92e130"} Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.790164 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f4883bd9-80ed-4500-806d-d1c2a04ebbbd","Type":"ContainerStarted","Data":"f754de240c6d331a1c60c14a483b0255de0c749b8a7f4a6d4110fdc6be89f34a"} Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.790588 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.811865 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" podStartSLOduration=2.811852829 podStartE2EDuration="2.811852829s" podCreationTimestamp="2025-11-28 11:29:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:29:55.810139291 +0000 UTC m=+1274.938823491" watchObservedRunningTime="2025-11-28 11:29:55.811852829 +0000 UTC m=+1274.940537039" Nov 28 11:29:55 crc kubenswrapper[4923]: I1128 11:29:55.836444 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.428025344 podStartE2EDuration="2.836428547s" podCreationTimestamp="2025-11-28 11:29:53 +0000 UTC" firstStartedPulling="2025-11-28 11:29:54.751797233 +0000 UTC m=+1273.880481443" lastFinishedPulling="2025-11-28 11:29:55.160200436 +0000 UTC m=+1274.288884646" observedRunningTime="2025-11-28 11:29:55.834221176 +0000 UTC m=+1274.962905386" watchObservedRunningTime="2025-11-28 11:29:55.836428547 +0000 UTC m=+1274.965112757" Nov 28 11:29:56 crc kubenswrapper[4923]: I1128 11:29:56.947226 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 11:29:56 crc kubenswrapper[4923]: I1128 11:29:56.979563 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:29:57 crc kubenswrapper[4923]: I1128 11:29:57.829886 4923 generic.go:334] "Generic (PLEG): container finished" podID="c1229884-4226-423a-b37f-4b1ee6f24044" containerID="86be352faaf76b0d36a8e5e3cb91998f16f70976be28638e8e7d703b7f56b62b" exitCode=0 Nov 28 11:29:57 crc kubenswrapper[4923]: I1128 11:29:57.829981 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1229884-4226-423a-b37f-4b1ee6f24044","Type":"ContainerDied","Data":"86be352faaf76b0d36a8e5e3cb91998f16f70976be28638e8e7d703b7f56b62b"} Nov 28 11:29:57 crc kubenswrapper[4923]: I1128 11:29:57.935565 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="21f79010-cee5-4d8c-8af5-cab32d6b0031" containerName="kube-state-metrics" probeResult="failure" output="Get \"http://10.217.0.103:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.171419 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.229923 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-sg-core-conf-yaml\") pod \"c1229884-4226-423a-b37f-4b1ee6f24044\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.230221 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-config-data\") pod \"c1229884-4226-423a-b37f-4b1ee6f24044\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.292063 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c1229884-4226-423a-b37f-4b1ee6f24044" (UID: "c1229884-4226-423a-b37f-4b1ee6f24044"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.333485 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1229884-4226-423a-b37f-4b1ee6f24044-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c1229884-4226-423a-b37f-4b1ee6f24044" (UID: "c1229884-4226-423a-b37f-4b1ee6f24044"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.345009 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1229884-4226-423a-b37f-4b1ee6f24044-run-httpd\") pod \"c1229884-4226-423a-b37f-4b1ee6f24044\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.345108 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djs9k\" (UniqueName: \"kubernetes.io/projected/c1229884-4226-423a-b37f-4b1ee6f24044-kube-api-access-djs9k\") pod \"c1229884-4226-423a-b37f-4b1ee6f24044\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.345184 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1229884-4226-423a-b37f-4b1ee6f24044-log-httpd\") pod \"c1229884-4226-423a-b37f-4b1ee6f24044\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.345230 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-scripts\") pod \"c1229884-4226-423a-b37f-4b1ee6f24044\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.345267 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-combined-ca-bundle\") pod \"c1229884-4226-423a-b37f-4b1ee6f24044\" (UID: \"c1229884-4226-423a-b37f-4b1ee6f24044\") " Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.345543 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1229884-4226-423a-b37f-4b1ee6f24044-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c1229884-4226-423a-b37f-4b1ee6f24044" (UID: "c1229884-4226-423a-b37f-4b1ee6f24044"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.345888 4923 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1229884-4226-423a-b37f-4b1ee6f24044-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.345900 4923 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.345910 4923 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1229884-4226-423a-b37f-4b1ee6f24044-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.357052 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-scripts" (OuterVolumeSpecName: "scripts") pod "c1229884-4226-423a-b37f-4b1ee6f24044" (UID: "c1229884-4226-423a-b37f-4b1ee6f24044"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.363073 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1229884-4226-423a-b37f-4b1ee6f24044-kube-api-access-djs9k" (OuterVolumeSpecName: "kube-api-access-djs9k") pod "c1229884-4226-423a-b37f-4b1ee6f24044" (UID: "c1229884-4226-423a-b37f-4b1ee6f24044"). InnerVolumeSpecName "kube-api-access-djs9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.433040 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-config-data" (OuterVolumeSpecName: "config-data") pod "c1229884-4226-423a-b37f-4b1ee6f24044" (UID: "c1229884-4226-423a-b37f-4b1ee6f24044"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.447603 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.447633 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djs9k\" (UniqueName: \"kubernetes.io/projected/c1229884-4226-423a-b37f-4b1ee6f24044-kube-api-access-djs9k\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.447643 4923 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.477189 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c1229884-4226-423a-b37f-4b1ee6f24044" (UID: "c1229884-4226-423a-b37f-4b1ee6f24044"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.548729 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1229884-4226-423a-b37f-4b1ee6f24044-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.842995 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d5b84f3f-7303-45e3-8785-391209a4b7d7","Type":"ContainerStarted","Data":"e7519e7865c0c1b3488e1fb36233b18ea9d2b2c3b11dd33cb4721b2cd319ade6"} Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.845666 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fb5f755a-29cb-453a-b61d-288cea6bba2e","Type":"ContainerStarted","Data":"778106caa588d36abba6cfc29648e8eb84aafbc46b21d47472b47df3a30a1a57"} Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.845697 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fb5f755a-29cb-453a-b61d-288cea6bba2e","Type":"ContainerStarted","Data":"923ca8eae6f7293f8ce238aacd5c1f3c26e1fd039b6d5b9a9b8f9803d297e562"} Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.852235 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="bcbca10e-dedc-4268-a109-a980d385561d" containerName="nova-metadata-log" containerID="cri-o://d53d463028f51702f5ceb71067af334690aa1b98920015edc029cca71c6e100a" gracePeriod=30 Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.852351 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="bcbca10e-dedc-4268-a109-a980d385561d" containerName="nova-metadata-metadata" containerID="cri-o://bea1c1465e6ed75e9ad3dc4f5bbdb4ae40aeffde07bb905c470ed3b258ee5099" gracePeriod=30 Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.852561 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bcbca10e-dedc-4268-a109-a980d385561d","Type":"ContainerStarted","Data":"bea1c1465e6ed75e9ad3dc4f5bbdb4ae40aeffde07bb905c470ed3b258ee5099"} Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.852588 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bcbca10e-dedc-4268-a109-a980d385561d","Type":"ContainerStarted","Data":"d53d463028f51702f5ceb71067af334690aa1b98920015edc029cca71c6e100a"} Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.864504 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc","Type":"ContainerStarted","Data":"5a7cc3ed0766ba4eae9f769a1cb4911fc67cf4338ee6dbc2f0fcaabdbbcf893d"} Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.864633 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://5a7cc3ed0766ba4eae9f769a1cb4911fc67cf4338ee6dbc2f0fcaabdbbcf893d" gracePeriod=30 Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.872482 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.956752781 podStartE2EDuration="6.872464421s" podCreationTimestamp="2025-11-28 11:29:52 +0000 UTC" firstStartedPulling="2025-11-28 11:29:53.956582001 +0000 UTC m=+1273.085266211" lastFinishedPulling="2025-11-28 11:29:57.872293631 +0000 UTC m=+1277.000977851" observedRunningTime="2025-11-28 11:29:58.85888084 +0000 UTC m=+1277.987565050" watchObservedRunningTime="2025-11-28 11:29:58.872464421 +0000 UTC m=+1278.001148631" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.880973 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.310821442 podStartE2EDuration="5.880959388s" podCreationTimestamp="2025-11-28 11:29:53 +0000 UTC" firstStartedPulling="2025-11-28 11:29:54.301166997 +0000 UTC m=+1273.429851207" lastFinishedPulling="2025-11-28 11:29:57.871304933 +0000 UTC m=+1276.999989153" observedRunningTime="2025-11-28 11:29:58.879395045 +0000 UTC m=+1278.008079255" watchObservedRunningTime="2025-11-28 11:29:58.880959388 +0000 UTC m=+1278.009643588" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.887368 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1229884-4226-423a-b37f-4b1ee6f24044","Type":"ContainerDied","Data":"8ade23a78d27fdd2e531464e4da09900fc7ad6ccc7577605f40c7b6347d9bb32"} Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.887430 4923 scope.go:117] "RemoveContainer" containerID="958a2ae2f75fb46838f5f077175af00dc5e904edc4ff3112bd19f46b219c166e" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.887598 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.958757 4923 scope.go:117] "RemoveContainer" containerID="540b90e4651d81d5fdd253e3722984df38dba28aac06b8040f1fe62a9fdc03b1" Nov 28 11:29:58 crc kubenswrapper[4923]: I1128 11:29:58.959093 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.078480338 podStartE2EDuration="6.959079375s" podCreationTimestamp="2025-11-28 11:29:52 +0000 UTC" firstStartedPulling="2025-11-28 11:29:53.996530419 +0000 UTC m=+1273.125214629" lastFinishedPulling="2025-11-28 11:29:57.877129446 +0000 UTC m=+1277.005813666" observedRunningTime="2025-11-28 11:29:58.915519536 +0000 UTC m=+1278.044203746" watchObservedRunningTime="2025-11-28 11:29:58.959079375 +0000 UTC m=+1278.087763585" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.052668 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.419935608 podStartE2EDuration="7.052646985s" podCreationTimestamp="2025-11-28 11:29:52 +0000 UTC" firstStartedPulling="2025-11-28 11:29:54.216313332 +0000 UTC m=+1273.344997542" lastFinishedPulling="2025-11-28 11:29:57.849024709 +0000 UTC m=+1276.977708919" observedRunningTime="2025-11-28 11:29:58.948667544 +0000 UTC m=+1278.077351754" watchObservedRunningTime="2025-11-28 11:29:59.052646985 +0000 UTC m=+1278.181331195" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.059140 4923 scope.go:117] "RemoveContainer" containerID="86be352faaf76b0d36a8e5e3cb91998f16f70976be28638e8e7d703b7f56b62b" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.073548 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.080647 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.087982 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.088028 4923 scope.go:117] "RemoveContainer" containerID="08ff6bf64e2f24657ef98da695188d51c1ab93e757a3a7fc87775d6f1fdb930b" Nov 28 11:29:59 crc kubenswrapper[4923]: E1128 11:29:59.088412 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1229884-4226-423a-b37f-4b1ee6f24044" containerName="ceilometer-central-agent" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.088426 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1229884-4226-423a-b37f-4b1ee6f24044" containerName="ceilometer-central-agent" Nov 28 11:29:59 crc kubenswrapper[4923]: E1128 11:29:59.088436 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1229884-4226-423a-b37f-4b1ee6f24044" containerName="ceilometer-notification-agent" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.088443 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1229884-4226-423a-b37f-4b1ee6f24044" containerName="ceilometer-notification-agent" Nov 28 11:29:59 crc kubenswrapper[4923]: E1128 11:29:59.088457 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1229884-4226-423a-b37f-4b1ee6f24044" containerName="proxy-httpd" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.088463 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1229884-4226-423a-b37f-4b1ee6f24044" containerName="proxy-httpd" Nov 28 11:29:59 crc kubenswrapper[4923]: E1128 11:29:59.088489 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1229884-4226-423a-b37f-4b1ee6f24044" containerName="sg-core" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.088495 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1229884-4226-423a-b37f-4b1ee6f24044" containerName="sg-core" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.088654 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1229884-4226-423a-b37f-4b1ee6f24044" containerName="proxy-httpd" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.088661 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1229884-4226-423a-b37f-4b1ee6f24044" containerName="ceilometer-central-agent" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.088674 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1229884-4226-423a-b37f-4b1ee6f24044" containerName="ceilometer-notification-agent" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.088692 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1229884-4226-423a-b37f-4b1ee6f24044" containerName="sg-core" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.090658 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.092349 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.096136 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.106796 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.108561 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.175918 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-scripts\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.176025 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.176077 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-run-httpd\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.176348 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-log-httpd\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.176511 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.176617 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pz8xh\" (UniqueName: \"kubernetes.io/projected/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-kube-api-access-pz8xh\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.176664 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-config-data\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.176688 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.180946 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1229884-4226-423a-b37f-4b1ee6f24044" path="/var/lib/kubelet/pods/c1229884-4226-423a-b37f-4b1ee6f24044/volumes" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.278098 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.278146 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-scripts\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.278203 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.278260 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-run-httpd\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.278709 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-run-httpd\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.279157 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-log-httpd\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.279268 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.279410 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pz8xh\" (UniqueName: \"kubernetes.io/projected/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-kube-api-access-pz8xh\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.279531 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-config-data\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.279650 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-log-httpd\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.287706 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-config-data\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.287799 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.291824 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.294859 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.299325 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pz8xh\" (UniqueName: \"kubernetes.io/projected/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-kube-api-access-pz8xh\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.299600 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-scripts\") pod \"ceilometer-0\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.405461 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.687953 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.787078 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcbca10e-dedc-4268-a109-a980d385561d-config-data\") pod \"bcbca10e-dedc-4268-a109-a980d385561d\" (UID: \"bcbca10e-dedc-4268-a109-a980d385561d\") " Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.787193 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w487s\" (UniqueName: \"kubernetes.io/projected/bcbca10e-dedc-4268-a109-a980d385561d-kube-api-access-w487s\") pod \"bcbca10e-dedc-4268-a109-a980d385561d\" (UID: \"bcbca10e-dedc-4268-a109-a980d385561d\") " Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.787216 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bcbca10e-dedc-4268-a109-a980d385561d-logs\") pod \"bcbca10e-dedc-4268-a109-a980d385561d\" (UID: \"bcbca10e-dedc-4268-a109-a980d385561d\") " Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.787272 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcbca10e-dedc-4268-a109-a980d385561d-combined-ca-bundle\") pod \"bcbca10e-dedc-4268-a109-a980d385561d\" (UID: \"bcbca10e-dedc-4268-a109-a980d385561d\") " Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.788791 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bcbca10e-dedc-4268-a109-a980d385561d-logs" (OuterVolumeSpecName: "logs") pod "bcbca10e-dedc-4268-a109-a980d385561d" (UID: "bcbca10e-dedc-4268-a109-a980d385561d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.800183 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcbca10e-dedc-4268-a109-a980d385561d-kube-api-access-w487s" (OuterVolumeSpecName: "kube-api-access-w487s") pod "bcbca10e-dedc-4268-a109-a980d385561d" (UID: "bcbca10e-dedc-4268-a109-a980d385561d"). InnerVolumeSpecName "kube-api-access-w487s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.818674 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcbca10e-dedc-4268-a109-a980d385561d-config-data" (OuterVolumeSpecName: "config-data") pod "bcbca10e-dedc-4268-a109-a980d385561d" (UID: "bcbca10e-dedc-4268-a109-a980d385561d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.818840 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcbca10e-dedc-4268-a109-a980d385561d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bcbca10e-dedc-4268-a109-a980d385561d" (UID: "bcbca10e-dedc-4268-a109-a980d385561d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.888873 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcbca10e-dedc-4268-a109-a980d385561d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.888911 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcbca10e-dedc-4268-a109-a980d385561d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.888920 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w487s\" (UniqueName: \"kubernetes.io/projected/bcbca10e-dedc-4268-a109-a980d385561d-kube-api-access-w487s\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.888942 4923 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bcbca10e-dedc-4268-a109-a980d385561d-logs\") on node \"crc\" DevicePath \"\"" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.896976 4923 generic.go:334] "Generic (PLEG): container finished" podID="bcbca10e-dedc-4268-a109-a980d385561d" containerID="bea1c1465e6ed75e9ad3dc4f5bbdb4ae40aeffde07bb905c470ed3b258ee5099" exitCode=0 Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.897316 4923 generic.go:334] "Generic (PLEG): container finished" podID="bcbca10e-dedc-4268-a109-a980d385561d" containerID="d53d463028f51702f5ceb71067af334690aa1b98920015edc029cca71c6e100a" exitCode=143 Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.897039 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.897076 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bcbca10e-dedc-4268-a109-a980d385561d","Type":"ContainerDied","Data":"bea1c1465e6ed75e9ad3dc4f5bbdb4ae40aeffde07bb905c470ed3b258ee5099"} Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.897383 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bcbca10e-dedc-4268-a109-a980d385561d","Type":"ContainerDied","Data":"d53d463028f51702f5ceb71067af334690aa1b98920015edc029cca71c6e100a"} Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.897398 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bcbca10e-dedc-4268-a109-a980d385561d","Type":"ContainerDied","Data":"e0ff1f5300ffc6b579b3d78d09cfa56105e74aa0ac7af68d3cf528ed8304fc0f"} Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.897419 4923 scope.go:117] "RemoveContainer" containerID="bea1c1465e6ed75e9ad3dc4f5bbdb4ae40aeffde07bb905c470ed3b258ee5099" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.928481 4923 scope.go:117] "RemoveContainer" containerID="d53d463028f51702f5ceb71067af334690aa1b98920015edc029cca71c6e100a" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.930407 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.952626 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.953704 4923 scope.go:117] "RemoveContainer" containerID="bea1c1465e6ed75e9ad3dc4f5bbdb4ae40aeffde07bb905c470ed3b258ee5099" Nov 28 11:29:59 crc kubenswrapper[4923]: E1128 11:29:59.955994 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bea1c1465e6ed75e9ad3dc4f5bbdb4ae40aeffde07bb905c470ed3b258ee5099\": container with ID starting with bea1c1465e6ed75e9ad3dc4f5bbdb4ae40aeffde07bb905c470ed3b258ee5099 not found: ID does not exist" containerID="bea1c1465e6ed75e9ad3dc4f5bbdb4ae40aeffde07bb905c470ed3b258ee5099" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.956042 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bea1c1465e6ed75e9ad3dc4f5bbdb4ae40aeffde07bb905c470ed3b258ee5099"} err="failed to get container status \"bea1c1465e6ed75e9ad3dc4f5bbdb4ae40aeffde07bb905c470ed3b258ee5099\": rpc error: code = NotFound desc = could not find container \"bea1c1465e6ed75e9ad3dc4f5bbdb4ae40aeffde07bb905c470ed3b258ee5099\": container with ID starting with bea1c1465e6ed75e9ad3dc4f5bbdb4ae40aeffde07bb905c470ed3b258ee5099 not found: ID does not exist" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.956070 4923 scope.go:117] "RemoveContainer" containerID="d53d463028f51702f5ceb71067af334690aa1b98920015edc029cca71c6e100a" Nov 28 11:29:59 crc kubenswrapper[4923]: E1128 11:29:59.956780 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d53d463028f51702f5ceb71067af334690aa1b98920015edc029cca71c6e100a\": container with ID starting with d53d463028f51702f5ceb71067af334690aa1b98920015edc029cca71c6e100a not found: ID does not exist" containerID="d53d463028f51702f5ceb71067af334690aa1b98920015edc029cca71c6e100a" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.956808 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d53d463028f51702f5ceb71067af334690aa1b98920015edc029cca71c6e100a"} err="failed to get container status \"d53d463028f51702f5ceb71067af334690aa1b98920015edc029cca71c6e100a\": rpc error: code = NotFound desc = could not find container \"d53d463028f51702f5ceb71067af334690aa1b98920015edc029cca71c6e100a\": container with ID starting with d53d463028f51702f5ceb71067af334690aa1b98920015edc029cca71c6e100a not found: ID does not exist" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.956829 4923 scope.go:117] "RemoveContainer" containerID="bea1c1465e6ed75e9ad3dc4f5bbdb4ae40aeffde07bb905c470ed3b258ee5099" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.957010 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bea1c1465e6ed75e9ad3dc4f5bbdb4ae40aeffde07bb905c470ed3b258ee5099"} err="failed to get container status \"bea1c1465e6ed75e9ad3dc4f5bbdb4ae40aeffde07bb905c470ed3b258ee5099\": rpc error: code = NotFound desc = could not find container \"bea1c1465e6ed75e9ad3dc4f5bbdb4ae40aeffde07bb905c470ed3b258ee5099\": container with ID starting with bea1c1465e6ed75e9ad3dc4f5bbdb4ae40aeffde07bb905c470ed3b258ee5099 not found: ID does not exist" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.957028 4923 scope.go:117] "RemoveContainer" containerID="d53d463028f51702f5ceb71067af334690aa1b98920015edc029cca71c6e100a" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.957183 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d53d463028f51702f5ceb71067af334690aa1b98920015edc029cca71c6e100a"} err="failed to get container status \"d53d463028f51702f5ceb71067af334690aa1b98920015edc029cca71c6e100a\": rpc error: code = NotFound desc = could not find container \"d53d463028f51702f5ceb71067af334690aa1b98920015edc029cca71c6e100a\": container with ID starting with d53d463028f51702f5ceb71067af334690aa1b98920015edc029cca71c6e100a not found: ID does not exist" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.961342 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:29:59 crc kubenswrapper[4923]: E1128 11:29:59.961709 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcbca10e-dedc-4268-a109-a980d385561d" containerName="nova-metadata-log" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.961726 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcbca10e-dedc-4268-a109-a980d385561d" containerName="nova-metadata-log" Nov 28 11:29:59 crc kubenswrapper[4923]: E1128 11:29:59.961766 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcbca10e-dedc-4268-a109-a980d385561d" containerName="nova-metadata-metadata" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.961773 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcbca10e-dedc-4268-a109-a980d385561d" containerName="nova-metadata-metadata" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.961921 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcbca10e-dedc-4268-a109-a980d385561d" containerName="nova-metadata-log" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.961956 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcbca10e-dedc-4268-a109-a980d385561d" containerName="nova-metadata-metadata" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.962771 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.964826 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 11:29:59 crc kubenswrapper[4923]: I1128 11:29:59.967495 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.016918 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.036071 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.094961 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h59p2\" (UniqueName: \"kubernetes.io/projected/069215bd-5276-4e56-820b-aab3df39b394-kube-api-access-h59p2\") pod \"nova-metadata-0\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " pod="openstack/nova-metadata-0" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.095007 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/069215bd-5276-4e56-820b-aab3df39b394-config-data\") pod \"nova-metadata-0\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " pod="openstack/nova-metadata-0" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.095074 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/069215bd-5276-4e56-820b-aab3df39b394-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " pod="openstack/nova-metadata-0" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.095183 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/069215bd-5276-4e56-820b-aab3df39b394-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " pod="openstack/nova-metadata-0" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.095213 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/069215bd-5276-4e56-820b-aab3df39b394-logs\") pod \"nova-metadata-0\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " pod="openstack/nova-metadata-0" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.135751 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk"] Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.140071 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.142828 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.143230 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.146235 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk"] Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.196854 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/069215bd-5276-4e56-820b-aab3df39b394-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " pod="openstack/nova-metadata-0" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.196923 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/069215bd-5276-4e56-820b-aab3df39b394-logs\") pod \"nova-metadata-0\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " pod="openstack/nova-metadata-0" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.196993 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h59p2\" (UniqueName: \"kubernetes.io/projected/069215bd-5276-4e56-820b-aab3df39b394-kube-api-access-h59p2\") pod \"nova-metadata-0\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " pod="openstack/nova-metadata-0" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.197020 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/069215bd-5276-4e56-820b-aab3df39b394-config-data\") pod \"nova-metadata-0\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " pod="openstack/nova-metadata-0" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.197094 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/069215bd-5276-4e56-820b-aab3df39b394-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " pod="openstack/nova-metadata-0" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.199403 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/069215bd-5276-4e56-820b-aab3df39b394-logs\") pod \"nova-metadata-0\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " pod="openstack/nova-metadata-0" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.211045 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/069215bd-5276-4e56-820b-aab3df39b394-config-data\") pod \"nova-metadata-0\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " pod="openstack/nova-metadata-0" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.212526 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/069215bd-5276-4e56-820b-aab3df39b394-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " pod="openstack/nova-metadata-0" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.223235 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h59p2\" (UniqueName: \"kubernetes.io/projected/069215bd-5276-4e56-820b-aab3df39b394-kube-api-access-h59p2\") pod \"nova-metadata-0\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " pod="openstack/nova-metadata-0" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.223294 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/069215bd-5276-4e56-820b-aab3df39b394-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " pod="openstack/nova-metadata-0" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.291151 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.298876 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1a72b24b-aac3-4955-9cb3-922f444004a8-secret-volume\") pod \"collect-profiles-29405490-pf6pk\" (UID: \"1a72b24b-aac3-4955-9cb3-922f444004a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.298926 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1a72b24b-aac3-4955-9cb3-922f444004a8-config-volume\") pod \"collect-profiles-29405490-pf6pk\" (UID: \"1a72b24b-aac3-4955-9cb3-922f444004a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.298972 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m69n4\" (UniqueName: \"kubernetes.io/projected/1a72b24b-aac3-4955-9cb3-922f444004a8-kube-api-access-m69n4\") pod \"collect-profiles-29405490-pf6pk\" (UID: \"1a72b24b-aac3-4955-9cb3-922f444004a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.401999 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1a72b24b-aac3-4955-9cb3-922f444004a8-secret-volume\") pod \"collect-profiles-29405490-pf6pk\" (UID: \"1a72b24b-aac3-4955-9cb3-922f444004a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.402280 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1a72b24b-aac3-4955-9cb3-922f444004a8-config-volume\") pod \"collect-profiles-29405490-pf6pk\" (UID: \"1a72b24b-aac3-4955-9cb3-922f444004a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.402315 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m69n4\" (UniqueName: \"kubernetes.io/projected/1a72b24b-aac3-4955-9cb3-922f444004a8-kube-api-access-m69n4\") pod \"collect-profiles-29405490-pf6pk\" (UID: \"1a72b24b-aac3-4955-9cb3-922f444004a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.403383 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1a72b24b-aac3-4955-9cb3-922f444004a8-config-volume\") pod \"collect-profiles-29405490-pf6pk\" (UID: \"1a72b24b-aac3-4955-9cb3-922f444004a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.406593 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1a72b24b-aac3-4955-9cb3-922f444004a8-secret-volume\") pod \"collect-profiles-29405490-pf6pk\" (UID: \"1a72b24b-aac3-4955-9cb3-922f444004a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.419842 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m69n4\" (UniqueName: \"kubernetes.io/projected/1a72b24b-aac3-4955-9cb3-922f444004a8-kube-api-access-m69n4\") pod \"collect-profiles-29405490-pf6pk\" (UID: \"1a72b24b-aac3-4955-9cb3-922f444004a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.458300 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk" Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.713370 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.889920 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk"] Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.925090 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"069215bd-5276-4e56-820b-aab3df39b394","Type":"ContainerStarted","Data":"d68a3b09932720540c97cf4e6133136b4c44f643e6f750cd8bce8bec805dff9c"} Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.938898 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86","Type":"ContainerStarted","Data":"f7e7622f5008a240742beba625688787b9273771d86e79898448ce0cda55b79c"} Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.939092 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86","Type":"ContainerStarted","Data":"022ed6859aeda20e9133f6bb50137426d3f13e0f4b44d1f331be632fa037b5a0"} Nov 28 11:30:00 crc kubenswrapper[4923]: I1128 11:30:00.943704 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk" event={"ID":"1a72b24b-aac3-4955-9cb3-922f444004a8","Type":"ContainerStarted","Data":"f31532e86cbe101c63b72c0a9b912b1835c15fadf356f53065330fbdf1a27d95"} Nov 28 11:30:01 crc kubenswrapper[4923]: I1128 11:30:01.226837 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bcbca10e-dedc-4268-a109-a980d385561d" path="/var/lib/kubelet/pods/bcbca10e-dedc-4268-a109-a980d385561d/volumes" Nov 28 11:30:01 crc kubenswrapper[4923]: I1128 11:30:01.958429 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"069215bd-5276-4e56-820b-aab3df39b394","Type":"ContainerStarted","Data":"b4cfb97a40bd744bf218c36d4dec8f634541efebc8624809536e73fc3780bfe6"} Nov 28 11:30:01 crc kubenswrapper[4923]: I1128 11:30:01.958900 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"069215bd-5276-4e56-820b-aab3df39b394","Type":"ContainerStarted","Data":"c9646a25c80e0643f60a4398d222949243e0058a417440257fce9a32ec1edb96"} Nov 28 11:30:01 crc kubenswrapper[4923]: I1128 11:30:01.961428 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86","Type":"ContainerStarted","Data":"da172f3b4dc7fdbc034daadc9929d699474ce8e227799073a7a52f2de4bc239b"} Nov 28 11:30:01 crc kubenswrapper[4923]: I1128 11:30:01.962428 4923 generic.go:334] "Generic (PLEG): container finished" podID="1a72b24b-aac3-4955-9cb3-922f444004a8" containerID="0825d033b7cfff0baad0a7a9fb3be46cd85a64ab88c8d9a365fc525eb44758be" exitCode=0 Nov 28 11:30:01 crc kubenswrapper[4923]: I1128 11:30:01.962454 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk" event={"ID":"1a72b24b-aac3-4955-9cb3-922f444004a8","Type":"ContainerDied","Data":"0825d033b7cfff0baad0a7a9fb3be46cd85a64ab88c8d9a365fc525eb44758be"} Nov 28 11:30:01 crc kubenswrapper[4923]: I1128 11:30:01.979475 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.979457051 podStartE2EDuration="2.979457051s" podCreationTimestamp="2025-11-28 11:29:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:30:01.97551282 +0000 UTC m=+1281.104197030" watchObservedRunningTime="2025-11-28 11:30:01.979457051 +0000 UTC m=+1281.108141261" Nov 28 11:30:02 crc kubenswrapper[4923]: I1128 11:30:02.983735 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86","Type":"ContainerStarted","Data":"88b6575f62304bfed1222f611085fe62823893245ec149eadf2de57718cfef99"} Nov 28 11:30:02 crc kubenswrapper[4923]: I1128 11:30:02.988325 4923 generic.go:334] "Generic (PLEG): container finished" podID="d26d3820-97ce-42bc-92c8-c48082942764" containerID="b371cb093ed219d127de4b09c49ab301c61daae66c5c0159d8c5f84d9db85a7a" exitCode=0 Nov 28 11:30:02 crc kubenswrapper[4923]: I1128 11:30:02.988460 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-8xtzd" event={"ID":"d26d3820-97ce-42bc-92c8-c48082942764","Type":"ContainerDied","Data":"b371cb093ed219d127de4b09c49ab301c61daae66c5c0159d8c5f84d9db85a7a"} Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.179976 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.180404 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.203780 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.203823 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.236542 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.337464 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk" Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.436912 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.463673 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1a72b24b-aac3-4955-9cb3-922f444004a8-config-volume\") pod \"1a72b24b-aac3-4955-9cb3-922f444004a8\" (UID: \"1a72b24b-aac3-4955-9cb3-922f444004a8\") " Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.463832 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m69n4\" (UniqueName: \"kubernetes.io/projected/1a72b24b-aac3-4955-9cb3-922f444004a8-kube-api-access-m69n4\") pod \"1a72b24b-aac3-4955-9cb3-922f444004a8\" (UID: \"1a72b24b-aac3-4955-9cb3-922f444004a8\") " Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.463982 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1a72b24b-aac3-4955-9cb3-922f444004a8-secret-volume\") pod \"1a72b24b-aac3-4955-9cb3-922f444004a8\" (UID: \"1a72b24b-aac3-4955-9cb3-922f444004a8\") " Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.464596 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a72b24b-aac3-4955-9cb3-922f444004a8-config-volume" (OuterVolumeSpecName: "config-volume") pod "1a72b24b-aac3-4955-9cb3-922f444004a8" (UID: "1a72b24b-aac3-4955-9cb3-922f444004a8"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.472041 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a72b24b-aac3-4955-9cb3-922f444004a8-kube-api-access-m69n4" (OuterVolumeSpecName: "kube-api-access-m69n4") pod "1a72b24b-aac3-4955-9cb3-922f444004a8" (UID: "1a72b24b-aac3-4955-9cb3-922f444004a8"). InnerVolumeSpecName "kube-api-access-m69n4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.481125 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a72b24b-aac3-4955-9cb3-922f444004a8-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1a72b24b-aac3-4955-9cb3-922f444004a8" (UID: "1a72b24b-aac3-4955-9cb3-922f444004a8"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.565350 4923 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1a72b24b-aac3-4955-9cb3-922f444004a8-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.565382 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m69n4\" (UniqueName: \"kubernetes.io/projected/1a72b24b-aac3-4955-9cb3-922f444004a8-kube-api-access-m69n4\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.565393 4923 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1a72b24b-aac3-4955-9cb3-922f444004a8-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.594163 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.659040 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-vr5qb"] Nov 28 11:30:03 crc kubenswrapper[4923]: I1128 11:30:03.659326 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" podUID="d1826eb1-8cef-42e9-a892-824c61ad704a" containerName="dnsmasq-dns" containerID="cri-o://81b62b488e165ea53f1fa56cc2b9e7bec2d122cc2c430235b672550bab1954cf" gracePeriod=10 Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.004224 4923 generic.go:334] "Generic (PLEG): container finished" podID="d1826eb1-8cef-42e9-a892-824c61ad704a" containerID="81b62b488e165ea53f1fa56cc2b9e7bec2d122cc2c430235b672550bab1954cf" exitCode=0 Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.004292 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" event={"ID":"d1826eb1-8cef-42e9-a892-824c61ad704a","Type":"ContainerDied","Data":"81b62b488e165ea53f1fa56cc2b9e7bec2d122cc2c430235b672550bab1954cf"} Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.011781 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk" event={"ID":"1a72b24b-aac3-4955-9cb3-922f444004a8","Type":"ContainerDied","Data":"f31532e86cbe101c63b72c0a9b912b1835c15fadf356f53065330fbdf1a27d95"} Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.011841 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f31532e86cbe101c63b72c0a9b912b1835c15fadf356f53065330fbdf1a27d95" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.012253 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405490-pf6pk" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.064229 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.164125 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.185046 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.271632 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="fb5f755a-29cb-453a-b61d-288cea6bba2e" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.166:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.271921 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="fb5f755a-29cb-453a-b61d-288cea6bba2e" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.166:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.282834 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-ovsdbserver-sb\") pod \"d1826eb1-8cef-42e9-a892-824c61ad704a\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.282869 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-dns-svc\") pod \"d1826eb1-8cef-42e9-a892-824c61ad704a\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.283007 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-ovsdbserver-nb\") pod \"d1826eb1-8cef-42e9-a892-824c61ad704a\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.283052 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7xvwk\" (UniqueName: \"kubernetes.io/projected/d1826eb1-8cef-42e9-a892-824c61ad704a-kube-api-access-7xvwk\") pod \"d1826eb1-8cef-42e9-a892-824c61ad704a\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.283142 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-config\") pod \"d1826eb1-8cef-42e9-a892-824c61ad704a\" (UID: \"d1826eb1-8cef-42e9-a892-824c61ad704a\") " Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.290315 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1826eb1-8cef-42e9-a892-824c61ad704a-kube-api-access-7xvwk" (OuterVolumeSpecName: "kube-api-access-7xvwk") pod "d1826eb1-8cef-42e9-a892-824c61ad704a" (UID: "d1826eb1-8cef-42e9-a892-824c61ad704a"). InnerVolumeSpecName "kube-api-access-7xvwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.367976 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d1826eb1-8cef-42e9-a892-824c61ad704a" (UID: "d1826eb1-8cef-42e9-a892-824c61ad704a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.411800 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7xvwk\" (UniqueName: \"kubernetes.io/projected/d1826eb1-8cef-42e9-a892-824c61ad704a-kube-api-access-7xvwk\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.411832 4923 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.432902 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d1826eb1-8cef-42e9-a892-824c61ad704a" (UID: "d1826eb1-8cef-42e9-a892-824c61ad704a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.461571 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d1826eb1-8cef-42e9-a892-824c61ad704a" (UID: "d1826eb1-8cef-42e9-a892-824c61ad704a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.501301 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-config" (OuterVolumeSpecName: "config") pod "d1826eb1-8cef-42e9-a892-824c61ad704a" (UID: "d1826eb1-8cef-42e9-a892-824c61ad704a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.532898 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.533525 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.533537 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1826eb1-8cef-42e9-a892-824c61ad704a-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.612308 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-8xtzd" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.739606 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d26d3820-97ce-42bc-92c8-c48082942764-scripts\") pod \"d26d3820-97ce-42bc-92c8-c48082942764\" (UID: \"d26d3820-97ce-42bc-92c8-c48082942764\") " Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.739804 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d26d3820-97ce-42bc-92c8-c48082942764-config-data\") pod \"d26d3820-97ce-42bc-92c8-c48082942764\" (UID: \"d26d3820-97ce-42bc-92c8-c48082942764\") " Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.739837 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d26d3820-97ce-42bc-92c8-c48082942764-combined-ca-bundle\") pod \"d26d3820-97ce-42bc-92c8-c48082942764\" (UID: \"d26d3820-97ce-42bc-92c8-c48082942764\") " Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.739883 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9429\" (UniqueName: \"kubernetes.io/projected/d26d3820-97ce-42bc-92c8-c48082942764-kube-api-access-f9429\") pod \"d26d3820-97ce-42bc-92c8-c48082942764\" (UID: \"d26d3820-97ce-42bc-92c8-c48082942764\") " Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.745840 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d26d3820-97ce-42bc-92c8-c48082942764-scripts" (OuterVolumeSpecName: "scripts") pod "d26d3820-97ce-42bc-92c8-c48082942764" (UID: "d26d3820-97ce-42bc-92c8-c48082942764"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.749104 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d26d3820-97ce-42bc-92c8-c48082942764-kube-api-access-f9429" (OuterVolumeSpecName: "kube-api-access-f9429") pod "d26d3820-97ce-42bc-92c8-c48082942764" (UID: "d26d3820-97ce-42bc-92c8-c48082942764"). InnerVolumeSpecName "kube-api-access-f9429". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.779263 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d26d3820-97ce-42bc-92c8-c48082942764-config-data" (OuterVolumeSpecName: "config-data") pod "d26d3820-97ce-42bc-92c8-c48082942764" (UID: "d26d3820-97ce-42bc-92c8-c48082942764"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.797872 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d26d3820-97ce-42bc-92c8-c48082942764-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d26d3820-97ce-42bc-92c8-c48082942764" (UID: "d26d3820-97ce-42bc-92c8-c48082942764"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.841365 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f9429\" (UniqueName: \"kubernetes.io/projected/d26d3820-97ce-42bc-92c8-c48082942764-kube-api-access-f9429\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.841392 4923 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d26d3820-97ce-42bc-92c8-c48082942764-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.841402 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d26d3820-97ce-42bc-92c8-c48082942764-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:04 crc kubenswrapper[4923]: I1128 11:30:04.841411 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d26d3820-97ce-42bc-92c8-c48082942764-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.044359 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86","Type":"ContainerStarted","Data":"a3447021828fb8202e6be520e5cb4b0b7bfc128ab753f18cc40c949570604056"} Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.045413 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.061517 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" event={"ID":"d1826eb1-8cef-42e9-a892-824c61ad704a","Type":"ContainerDied","Data":"f8b71dc55eab5bd398e889652a356b311164431fe5eba6eff4a3e4d89457143f"} Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.061573 4923 scope.go:117] "RemoveContainer" containerID="81b62b488e165ea53f1fa56cc2b9e7bec2d122cc2c430235b672550bab1954cf" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.061539 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d97fcdd8f-vr5qb" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.066576 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.405170809 podStartE2EDuration="7.066559974s" podCreationTimestamp="2025-11-28 11:29:58 +0000 UTC" firstStartedPulling="2025-11-28 11:30:00.047769863 +0000 UTC m=+1279.176454073" lastFinishedPulling="2025-11-28 11:30:04.709159028 +0000 UTC m=+1283.837843238" observedRunningTime="2025-11-28 11:30:05.062768617 +0000 UTC m=+1284.191452827" watchObservedRunningTime="2025-11-28 11:30:05.066559974 +0000 UTC m=+1284.195244184" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.078633 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-8xtzd" event={"ID":"d26d3820-97ce-42bc-92c8-c48082942764","Type":"ContainerDied","Data":"13d868896b7e178a98d557b5f55ec5948e29255fe112a09d019415a589b9f790"} Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.078664 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="13d868896b7e178a98d557b5f55ec5948e29255fe112a09d019415a589b9f790" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.078667 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-8xtzd" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.086077 4923 generic.go:334] "Generic (PLEG): container finished" podID="3ad75bb0-7f36-4ac3-b0a7-402237601802" containerID="7e9207943b07a406fe8f65daf852a00fa82fa2e38b70dafe5af1e90dda7afd40" exitCode=0 Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.086731 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-rmq5g" event={"ID":"3ad75bb0-7f36-4ac3-b0a7-402237601802","Type":"ContainerDied","Data":"7e9207943b07a406fe8f65daf852a00fa82fa2e38b70dafe5af1e90dda7afd40"} Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.101049 4923 scope.go:117] "RemoveContainer" containerID="22393a4db8df8b155ef6d78ae071068e8b4bfb9b1ae8dc0f5e137eae1701648a" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.117706 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-vr5qb"] Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.134080 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-vr5qb"] Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.150966 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.151169 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="fb5f755a-29cb-453a-b61d-288cea6bba2e" containerName="nova-api-log" containerID="cri-o://923ca8eae6f7293f8ce238aacd5c1f3c26e1fd039b6d5b9a9b8f9803d297e562" gracePeriod=30 Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.151508 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="fb5f755a-29cb-453a-b61d-288cea6bba2e" containerName="nova-api-api" containerID="cri-o://778106caa588d36abba6cfc29648e8eb84aafbc46b21d47472b47df3a30a1a57" gracePeriod=30 Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.183004 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1826eb1-8cef-42e9-a892-824c61ad704a" path="/var/lib/kubelet/pods/d1826eb1-8cef-42e9-a892-824c61ad704a/volumes" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.183946 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.184141 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="069215bd-5276-4e56-820b-aab3df39b394" containerName="nova-metadata-log" containerID="cri-o://c9646a25c80e0643f60a4398d222949243e0058a417440257fce9a32ec1edb96" gracePeriod=30 Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.184264 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="069215bd-5276-4e56-820b-aab3df39b394" containerName="nova-metadata-metadata" containerID="cri-o://b4cfb97a40bd744bf218c36d4dec8f634541efebc8624809536e73fc3780bfe6" gracePeriod=30 Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.291350 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.291399 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.442719 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.818167 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.860523 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/069215bd-5276-4e56-820b-aab3df39b394-config-data\") pod \"069215bd-5276-4e56-820b-aab3df39b394\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.861403 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/069215bd-5276-4e56-820b-aab3df39b394-nova-metadata-tls-certs\") pod \"069215bd-5276-4e56-820b-aab3df39b394\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.861738 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h59p2\" (UniqueName: \"kubernetes.io/projected/069215bd-5276-4e56-820b-aab3df39b394-kube-api-access-h59p2\") pod \"069215bd-5276-4e56-820b-aab3df39b394\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.861825 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/069215bd-5276-4e56-820b-aab3df39b394-logs\") pod \"069215bd-5276-4e56-820b-aab3df39b394\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.861863 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/069215bd-5276-4e56-820b-aab3df39b394-combined-ca-bundle\") pod \"069215bd-5276-4e56-820b-aab3df39b394\" (UID: \"069215bd-5276-4e56-820b-aab3df39b394\") " Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.863916 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/069215bd-5276-4e56-820b-aab3df39b394-logs" (OuterVolumeSpecName: "logs") pod "069215bd-5276-4e56-820b-aab3df39b394" (UID: "069215bd-5276-4e56-820b-aab3df39b394"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.875440 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/069215bd-5276-4e56-820b-aab3df39b394-kube-api-access-h59p2" (OuterVolumeSpecName: "kube-api-access-h59p2") pod "069215bd-5276-4e56-820b-aab3df39b394" (UID: "069215bd-5276-4e56-820b-aab3df39b394"). InnerVolumeSpecName "kube-api-access-h59p2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.907355 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/069215bd-5276-4e56-820b-aab3df39b394-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "069215bd-5276-4e56-820b-aab3df39b394" (UID: "069215bd-5276-4e56-820b-aab3df39b394"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.907749 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/069215bd-5276-4e56-820b-aab3df39b394-config-data" (OuterVolumeSpecName: "config-data") pod "069215bd-5276-4e56-820b-aab3df39b394" (UID: "069215bd-5276-4e56-820b-aab3df39b394"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.928026 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/069215bd-5276-4e56-820b-aab3df39b394-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "069215bd-5276-4e56-820b-aab3df39b394" (UID: "069215bd-5276-4e56-820b-aab3df39b394"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.964013 4923 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/069215bd-5276-4e56-820b-aab3df39b394-logs\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.964043 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/069215bd-5276-4e56-820b-aab3df39b394-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.964065 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/069215bd-5276-4e56-820b-aab3df39b394-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.964074 4923 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/069215bd-5276-4e56-820b-aab3df39b394-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:05 crc kubenswrapper[4923]: I1128 11:30:05.964083 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h59p2\" (UniqueName: \"kubernetes.io/projected/069215bd-5276-4e56-820b-aab3df39b394-kube-api-access-h59p2\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.130643 4923 generic.go:334] "Generic (PLEG): container finished" podID="fb5f755a-29cb-453a-b61d-288cea6bba2e" containerID="923ca8eae6f7293f8ce238aacd5c1f3c26e1fd039b6d5b9a9b8f9803d297e562" exitCode=143 Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.130727 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fb5f755a-29cb-453a-b61d-288cea6bba2e","Type":"ContainerDied","Data":"923ca8eae6f7293f8ce238aacd5c1f3c26e1fd039b6d5b9a9b8f9803d297e562"} Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.133704 4923 generic.go:334] "Generic (PLEG): container finished" podID="069215bd-5276-4e56-820b-aab3df39b394" containerID="b4cfb97a40bd744bf218c36d4dec8f634541efebc8624809536e73fc3780bfe6" exitCode=0 Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.133742 4923 generic.go:334] "Generic (PLEG): container finished" podID="069215bd-5276-4e56-820b-aab3df39b394" containerID="c9646a25c80e0643f60a4398d222949243e0058a417440257fce9a32ec1edb96" exitCode=143 Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.133782 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"069215bd-5276-4e56-820b-aab3df39b394","Type":"ContainerDied","Data":"b4cfb97a40bd744bf218c36d4dec8f634541efebc8624809536e73fc3780bfe6"} Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.133801 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.133846 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"069215bd-5276-4e56-820b-aab3df39b394","Type":"ContainerDied","Data":"c9646a25c80e0643f60a4398d222949243e0058a417440257fce9a32ec1edb96"} Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.133862 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"069215bd-5276-4e56-820b-aab3df39b394","Type":"ContainerDied","Data":"d68a3b09932720540c97cf4e6133136b4c44f643e6f750cd8bce8bec805dff9c"} Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.133887 4923 scope.go:117] "RemoveContainer" containerID="b4cfb97a40bd744bf218c36d4dec8f634541efebc8624809536e73fc3780bfe6" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.137984 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="d5b84f3f-7303-45e3-8785-391209a4b7d7" containerName="nova-scheduler-scheduler" containerID="cri-o://e7519e7865c0c1b3488e1fb36233b18ea9d2b2c3b11dd33cb4721b2cd319ade6" gracePeriod=30 Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.179384 4923 scope.go:117] "RemoveContainer" containerID="c9646a25c80e0643f60a4398d222949243e0058a417440257fce9a32ec1edb96" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.201131 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.207665 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.216522 4923 scope.go:117] "RemoveContainer" containerID="b4cfb97a40bd744bf218c36d4dec8f634541efebc8624809536e73fc3780bfe6" Nov 28 11:30:06 crc kubenswrapper[4923]: E1128 11:30:06.216915 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4cfb97a40bd744bf218c36d4dec8f634541efebc8624809536e73fc3780bfe6\": container with ID starting with b4cfb97a40bd744bf218c36d4dec8f634541efebc8624809536e73fc3780bfe6 not found: ID does not exist" containerID="b4cfb97a40bd744bf218c36d4dec8f634541efebc8624809536e73fc3780bfe6" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.216959 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4cfb97a40bd744bf218c36d4dec8f634541efebc8624809536e73fc3780bfe6"} err="failed to get container status \"b4cfb97a40bd744bf218c36d4dec8f634541efebc8624809536e73fc3780bfe6\": rpc error: code = NotFound desc = could not find container \"b4cfb97a40bd744bf218c36d4dec8f634541efebc8624809536e73fc3780bfe6\": container with ID starting with b4cfb97a40bd744bf218c36d4dec8f634541efebc8624809536e73fc3780bfe6 not found: ID does not exist" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.216979 4923 scope.go:117] "RemoveContainer" containerID="c9646a25c80e0643f60a4398d222949243e0058a417440257fce9a32ec1edb96" Nov 28 11:30:06 crc kubenswrapper[4923]: E1128 11:30:06.217169 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9646a25c80e0643f60a4398d222949243e0058a417440257fce9a32ec1edb96\": container with ID starting with c9646a25c80e0643f60a4398d222949243e0058a417440257fce9a32ec1edb96 not found: ID does not exist" containerID="c9646a25c80e0643f60a4398d222949243e0058a417440257fce9a32ec1edb96" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.217188 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9646a25c80e0643f60a4398d222949243e0058a417440257fce9a32ec1edb96"} err="failed to get container status \"c9646a25c80e0643f60a4398d222949243e0058a417440257fce9a32ec1edb96\": rpc error: code = NotFound desc = could not find container \"c9646a25c80e0643f60a4398d222949243e0058a417440257fce9a32ec1edb96\": container with ID starting with c9646a25c80e0643f60a4398d222949243e0058a417440257fce9a32ec1edb96 not found: ID does not exist" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.217211 4923 scope.go:117] "RemoveContainer" containerID="b4cfb97a40bd744bf218c36d4dec8f634541efebc8624809536e73fc3780bfe6" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.217390 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4cfb97a40bd744bf218c36d4dec8f634541efebc8624809536e73fc3780bfe6"} err="failed to get container status \"b4cfb97a40bd744bf218c36d4dec8f634541efebc8624809536e73fc3780bfe6\": rpc error: code = NotFound desc = could not find container \"b4cfb97a40bd744bf218c36d4dec8f634541efebc8624809536e73fc3780bfe6\": container with ID starting with b4cfb97a40bd744bf218c36d4dec8f634541efebc8624809536e73fc3780bfe6 not found: ID does not exist" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.217409 4923 scope.go:117] "RemoveContainer" containerID="c9646a25c80e0643f60a4398d222949243e0058a417440257fce9a32ec1edb96" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.217592 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9646a25c80e0643f60a4398d222949243e0058a417440257fce9a32ec1edb96"} err="failed to get container status \"c9646a25c80e0643f60a4398d222949243e0058a417440257fce9a32ec1edb96\": rpc error: code = NotFound desc = could not find container \"c9646a25c80e0643f60a4398d222949243e0058a417440257fce9a32ec1edb96\": container with ID starting with c9646a25c80e0643f60a4398d222949243e0058a417440257fce9a32ec1edb96 not found: ID does not exist" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.227977 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:30:06 crc kubenswrapper[4923]: E1128 11:30:06.228391 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="069215bd-5276-4e56-820b-aab3df39b394" containerName="nova-metadata-log" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.228407 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="069215bd-5276-4e56-820b-aab3df39b394" containerName="nova-metadata-log" Nov 28 11:30:06 crc kubenswrapper[4923]: E1128 11:30:06.228420 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="069215bd-5276-4e56-820b-aab3df39b394" containerName="nova-metadata-metadata" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.228427 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="069215bd-5276-4e56-820b-aab3df39b394" containerName="nova-metadata-metadata" Nov 28 11:30:06 crc kubenswrapper[4923]: E1128 11:30:06.228434 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d26d3820-97ce-42bc-92c8-c48082942764" containerName="nova-manage" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.228440 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="d26d3820-97ce-42bc-92c8-c48082942764" containerName="nova-manage" Nov 28 11:30:06 crc kubenswrapper[4923]: E1128 11:30:06.228456 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1826eb1-8cef-42e9-a892-824c61ad704a" containerName="dnsmasq-dns" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.228462 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1826eb1-8cef-42e9-a892-824c61ad704a" containerName="dnsmasq-dns" Nov 28 11:30:06 crc kubenswrapper[4923]: E1128 11:30:06.228472 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a72b24b-aac3-4955-9cb3-922f444004a8" containerName="collect-profiles" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.228478 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a72b24b-aac3-4955-9cb3-922f444004a8" containerName="collect-profiles" Nov 28 11:30:06 crc kubenswrapper[4923]: E1128 11:30:06.228489 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1826eb1-8cef-42e9-a892-824c61ad704a" containerName="init" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.228495 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1826eb1-8cef-42e9-a892-824c61ad704a" containerName="init" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.228649 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="d26d3820-97ce-42bc-92c8-c48082942764" containerName="nova-manage" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.228668 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="069215bd-5276-4e56-820b-aab3df39b394" containerName="nova-metadata-log" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.228677 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a72b24b-aac3-4955-9cb3-922f444004a8" containerName="collect-profiles" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.228685 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="069215bd-5276-4e56-820b-aab3df39b394" containerName="nova-metadata-metadata" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.228695 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1826eb1-8cef-42e9-a892-824c61ad704a" containerName="dnsmasq-dns" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.229659 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.231892 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.232115 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.246136 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.272663 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57d1e985-8768-421e-910e-a65c632dd5d3-logs\") pod \"nova-metadata-0\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " pod="openstack/nova-metadata-0" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.278241 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57d1e985-8768-421e-910e-a65c632dd5d3-config-data\") pod \"nova-metadata-0\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " pod="openstack/nova-metadata-0" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.278367 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57d1e985-8768-421e-910e-a65c632dd5d3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " pod="openstack/nova-metadata-0" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.278421 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/57d1e985-8768-421e-910e-a65c632dd5d3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " pod="openstack/nova-metadata-0" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.278532 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwpdp\" (UniqueName: \"kubernetes.io/projected/57d1e985-8768-421e-910e-a65c632dd5d3-kube-api-access-xwpdp\") pod \"nova-metadata-0\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " pod="openstack/nova-metadata-0" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.380068 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwpdp\" (UniqueName: \"kubernetes.io/projected/57d1e985-8768-421e-910e-a65c632dd5d3-kube-api-access-xwpdp\") pod \"nova-metadata-0\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " pod="openstack/nova-metadata-0" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.380152 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57d1e985-8768-421e-910e-a65c632dd5d3-logs\") pod \"nova-metadata-0\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " pod="openstack/nova-metadata-0" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.380181 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57d1e985-8768-421e-910e-a65c632dd5d3-config-data\") pod \"nova-metadata-0\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " pod="openstack/nova-metadata-0" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.380227 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57d1e985-8768-421e-910e-a65c632dd5d3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " pod="openstack/nova-metadata-0" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.380251 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/57d1e985-8768-421e-910e-a65c632dd5d3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " pod="openstack/nova-metadata-0" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.381319 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57d1e985-8768-421e-910e-a65c632dd5d3-logs\") pod \"nova-metadata-0\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " pod="openstack/nova-metadata-0" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.398435 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwpdp\" (UniqueName: \"kubernetes.io/projected/57d1e985-8768-421e-910e-a65c632dd5d3-kube-api-access-xwpdp\") pod \"nova-metadata-0\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " pod="openstack/nova-metadata-0" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.399037 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57d1e985-8768-421e-910e-a65c632dd5d3-config-data\") pod \"nova-metadata-0\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " pod="openstack/nova-metadata-0" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.399448 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/57d1e985-8768-421e-910e-a65c632dd5d3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " pod="openstack/nova-metadata-0" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.403667 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57d1e985-8768-421e-910e-a65c632dd5d3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " pod="openstack/nova-metadata-0" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.536759 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-rmq5g" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.560139 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.585397 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad75bb0-7f36-4ac3-b0a7-402237601802-config-data\") pod \"3ad75bb0-7f36-4ac3-b0a7-402237601802\" (UID: \"3ad75bb0-7f36-4ac3-b0a7-402237601802\") " Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.585819 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ad75bb0-7f36-4ac3-b0a7-402237601802-scripts\") pod \"3ad75bb0-7f36-4ac3-b0a7-402237601802\" (UID: \"3ad75bb0-7f36-4ac3-b0a7-402237601802\") " Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.585842 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad75bb0-7f36-4ac3-b0a7-402237601802-combined-ca-bundle\") pod \"3ad75bb0-7f36-4ac3-b0a7-402237601802\" (UID: \"3ad75bb0-7f36-4ac3-b0a7-402237601802\") " Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.585887 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hp5h\" (UniqueName: \"kubernetes.io/projected/3ad75bb0-7f36-4ac3-b0a7-402237601802-kube-api-access-7hp5h\") pod \"3ad75bb0-7f36-4ac3-b0a7-402237601802\" (UID: \"3ad75bb0-7f36-4ac3-b0a7-402237601802\") " Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.595739 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ad75bb0-7f36-4ac3-b0a7-402237601802-scripts" (OuterVolumeSpecName: "scripts") pod "3ad75bb0-7f36-4ac3-b0a7-402237601802" (UID: "3ad75bb0-7f36-4ac3-b0a7-402237601802"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.597995 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ad75bb0-7f36-4ac3-b0a7-402237601802-kube-api-access-7hp5h" (OuterVolumeSpecName: "kube-api-access-7hp5h") pod "3ad75bb0-7f36-4ac3-b0a7-402237601802" (UID: "3ad75bb0-7f36-4ac3-b0a7-402237601802"). InnerVolumeSpecName "kube-api-access-7hp5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.631968 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ad75bb0-7f36-4ac3-b0a7-402237601802-config-data" (OuterVolumeSpecName: "config-data") pod "3ad75bb0-7f36-4ac3-b0a7-402237601802" (UID: "3ad75bb0-7f36-4ac3-b0a7-402237601802"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.656103 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ad75bb0-7f36-4ac3-b0a7-402237601802-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3ad75bb0-7f36-4ac3-b0a7-402237601802" (UID: "3ad75bb0-7f36-4ac3-b0a7-402237601802"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.688042 4923 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ad75bb0-7f36-4ac3-b0a7-402237601802-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.688068 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad75bb0-7f36-4ac3-b0a7-402237601802-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.688079 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7hp5h\" (UniqueName: \"kubernetes.io/projected/3ad75bb0-7f36-4ac3-b0a7-402237601802-kube-api-access-7hp5h\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:06 crc kubenswrapper[4923]: I1128 11:30:06.688090 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad75bb0-7f36-4ac3-b0a7-402237601802-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.050014 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:30:07 crc kubenswrapper[4923]: W1128 11:30:07.075429 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod57d1e985_8768_421e_910e_a65c632dd5d3.slice/crio-eafeebf16f656652224a40f97fa6cf55240cf4223528e443d2f6279201f3d1ca WatchSource:0}: Error finding container eafeebf16f656652224a40f97fa6cf55240cf4223528e443d2f6279201f3d1ca: Status 404 returned error can't find the container with id eafeebf16f656652224a40f97fa6cf55240cf4223528e443d2f6279201f3d1ca Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.206255 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-rmq5g" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.207234 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="069215bd-5276-4e56-820b-aab3df39b394" path="/var/lib/kubelet/pods/069215bd-5276-4e56-820b-aab3df39b394/volumes" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.207908 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-rmq5g" event={"ID":"3ad75bb0-7f36-4ac3-b0a7-402237601802","Type":"ContainerDied","Data":"5701da96fa9d8dca59c533d8d0171fed2cd85fea4ec9991df03baacaaa1a998f"} Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.207949 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5701da96fa9d8dca59c533d8d0171fed2cd85fea4ec9991df03baacaaa1a998f" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.234943 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"57d1e985-8768-421e-910e-a65c632dd5d3","Type":"ContainerStarted","Data":"eafeebf16f656652224a40f97fa6cf55240cf4223528e443d2f6279201f3d1ca"} Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.239828 4923 generic.go:334] "Generic (PLEG): container finished" podID="d5b84f3f-7303-45e3-8785-391209a4b7d7" containerID="e7519e7865c0c1b3488e1fb36233b18ea9d2b2c3b11dd33cb4721b2cd319ade6" exitCode=0 Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.240811 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d5b84f3f-7303-45e3-8785-391209a4b7d7","Type":"ContainerDied","Data":"e7519e7865c0c1b3488e1fb36233b18ea9d2b2c3b11dd33cb4721b2cd319ade6"} Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.246278 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 11:30:07 crc kubenswrapper[4923]: E1128 11:30:07.246852 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ad75bb0-7f36-4ac3-b0a7-402237601802" containerName="nova-cell1-conductor-db-sync" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.246917 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ad75bb0-7f36-4ac3-b0a7-402237601802" containerName="nova-cell1-conductor-db-sync" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.247165 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ad75bb0-7f36-4ac3-b0a7-402237601802" containerName="nova-cell1-conductor-db-sync" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.247792 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.251372 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.265217 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.297382 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa5948ea-11c1-4107-b068-e15ba465c8c6-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"fa5948ea-11c1-4107-b068-e15ba465c8c6\") " pod="openstack/nova-cell1-conductor-0" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.297862 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5pszg\" (UniqueName: \"kubernetes.io/projected/fa5948ea-11c1-4107-b068-e15ba465c8c6-kube-api-access-5pszg\") pod \"nova-cell1-conductor-0\" (UID: \"fa5948ea-11c1-4107-b068-e15ba465c8c6\") " pod="openstack/nova-cell1-conductor-0" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.298064 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa5948ea-11c1-4107-b068-e15ba465c8c6-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"fa5948ea-11c1-4107-b068-e15ba465c8c6\") " pod="openstack/nova-cell1-conductor-0" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.399374 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa5948ea-11c1-4107-b068-e15ba465c8c6-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"fa5948ea-11c1-4107-b068-e15ba465c8c6\") " pod="openstack/nova-cell1-conductor-0" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.399689 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa5948ea-11c1-4107-b068-e15ba465c8c6-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"fa5948ea-11c1-4107-b068-e15ba465c8c6\") " pod="openstack/nova-cell1-conductor-0" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.399820 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5pszg\" (UniqueName: \"kubernetes.io/projected/fa5948ea-11c1-4107-b068-e15ba465c8c6-kube-api-access-5pszg\") pod \"nova-cell1-conductor-0\" (UID: \"fa5948ea-11c1-4107-b068-e15ba465c8c6\") " pod="openstack/nova-cell1-conductor-0" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.400536 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.406178 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa5948ea-11c1-4107-b068-e15ba465c8c6-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"fa5948ea-11c1-4107-b068-e15ba465c8c6\") " pod="openstack/nova-cell1-conductor-0" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.407624 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa5948ea-11c1-4107-b068-e15ba465c8c6-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"fa5948ea-11c1-4107-b068-e15ba465c8c6\") " pod="openstack/nova-cell1-conductor-0" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.425578 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5pszg\" (UniqueName: \"kubernetes.io/projected/fa5948ea-11c1-4107-b068-e15ba465c8c6-kube-api-access-5pszg\") pod \"nova-cell1-conductor-0\" (UID: \"fa5948ea-11c1-4107-b068-e15ba465c8c6\") " pod="openstack/nova-cell1-conductor-0" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.502906 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-686mb\" (UniqueName: \"kubernetes.io/projected/d5b84f3f-7303-45e3-8785-391209a4b7d7-kube-api-access-686mb\") pod \"d5b84f3f-7303-45e3-8785-391209a4b7d7\" (UID: \"d5b84f3f-7303-45e3-8785-391209a4b7d7\") " Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.502964 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5b84f3f-7303-45e3-8785-391209a4b7d7-config-data\") pod \"d5b84f3f-7303-45e3-8785-391209a4b7d7\" (UID: \"d5b84f3f-7303-45e3-8785-391209a4b7d7\") " Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.503007 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5b84f3f-7303-45e3-8785-391209a4b7d7-combined-ca-bundle\") pod \"d5b84f3f-7303-45e3-8785-391209a4b7d7\" (UID: \"d5b84f3f-7303-45e3-8785-391209a4b7d7\") " Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.512965 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5b84f3f-7303-45e3-8785-391209a4b7d7-kube-api-access-686mb" (OuterVolumeSpecName: "kube-api-access-686mb") pod "d5b84f3f-7303-45e3-8785-391209a4b7d7" (UID: "d5b84f3f-7303-45e3-8785-391209a4b7d7"). InnerVolumeSpecName "kube-api-access-686mb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.545430 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5b84f3f-7303-45e3-8785-391209a4b7d7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d5b84f3f-7303-45e3-8785-391209a4b7d7" (UID: "d5b84f3f-7303-45e3-8785-391209a4b7d7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.549326 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5b84f3f-7303-45e3-8785-391209a4b7d7-config-data" (OuterVolumeSpecName: "config-data") pod "d5b84f3f-7303-45e3-8785-391209a4b7d7" (UID: "d5b84f3f-7303-45e3-8785-391209a4b7d7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.605321 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-686mb\" (UniqueName: \"kubernetes.io/projected/d5b84f3f-7303-45e3-8785-391209a4b7d7-kube-api-access-686mb\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.605549 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5b84f3f-7303-45e3-8785-391209a4b7d7-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.605628 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5b84f3f-7303-45e3-8785-391209a4b7d7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:07 crc kubenswrapper[4923]: I1128 11:30:07.699614 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.199006 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 11:30:08 crc kubenswrapper[4923]: W1128 11:30:08.206836 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa5948ea_11c1_4107_b068_e15ba465c8c6.slice/crio-9d85dbd515b1711c18c4575482948850696794154e5ed53745588ddfdd4b81b3 WatchSource:0}: Error finding container 9d85dbd515b1711c18c4575482948850696794154e5ed53745588ddfdd4b81b3: Status 404 returned error can't find the container with id 9d85dbd515b1711c18c4575482948850696794154e5ed53745588ddfdd4b81b3 Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.250772 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"57d1e985-8768-421e-910e-a65c632dd5d3","Type":"ContainerStarted","Data":"c30246725eaff2c585e36ce51e3b0eab9975a8a35f7258d575e8082ab1ae35d7"} Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.251046 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"57d1e985-8768-421e-910e-a65c632dd5d3","Type":"ContainerStarted","Data":"7f54048d653272cb4fc96fe294624a60df9fc6909fa83caa550835186270ba20"} Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.252102 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d5b84f3f-7303-45e3-8785-391209a4b7d7","Type":"ContainerDied","Data":"812c0473fc9f177cc0018c89540347e25239d05e5fb36076f453c2458e0c71c7"} Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.252145 4923 scope.go:117] "RemoveContainer" containerID="e7519e7865c0c1b3488e1fb36233b18ea9d2b2c3b11dd33cb4721b2cd319ade6" Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.252214 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.257139 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"fa5948ea-11c1-4107-b068-e15ba465c8c6","Type":"ContainerStarted","Data":"9d85dbd515b1711c18c4575482948850696794154e5ed53745588ddfdd4b81b3"} Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.296251 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.296231238 podStartE2EDuration="2.296231238s" podCreationTimestamp="2025-11-28 11:30:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:30:08.266707781 +0000 UTC m=+1287.395391991" watchObservedRunningTime="2025-11-28 11:30:08.296231238 +0000 UTC m=+1287.424915448" Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.392998 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.402574 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.411357 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 11:30:08 crc kubenswrapper[4923]: E1128 11:30:08.411688 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b84f3f-7303-45e3-8785-391209a4b7d7" containerName="nova-scheduler-scheduler" Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.411705 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b84f3f-7303-45e3-8785-391209a4b7d7" containerName="nova-scheduler-scheduler" Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.411882 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b84f3f-7303-45e3-8785-391209a4b7d7" containerName="nova-scheduler-scheduler" Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.412477 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.418560 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.426853 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.552290 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6gw4\" (UniqueName: \"kubernetes.io/projected/e122279d-7bf5-41e7-b4c7-6c2edbdbe508-kube-api-access-x6gw4\") pod \"nova-scheduler-0\" (UID: \"e122279d-7bf5-41e7-b4c7-6c2edbdbe508\") " pod="openstack/nova-scheduler-0" Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.552366 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e122279d-7bf5-41e7-b4c7-6c2edbdbe508-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e122279d-7bf5-41e7-b4c7-6c2edbdbe508\") " pod="openstack/nova-scheduler-0" Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.552438 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e122279d-7bf5-41e7-b4c7-6c2edbdbe508-config-data\") pod \"nova-scheduler-0\" (UID: \"e122279d-7bf5-41e7-b4c7-6c2edbdbe508\") " pod="openstack/nova-scheduler-0" Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.653736 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6gw4\" (UniqueName: \"kubernetes.io/projected/e122279d-7bf5-41e7-b4c7-6c2edbdbe508-kube-api-access-x6gw4\") pod \"nova-scheduler-0\" (UID: \"e122279d-7bf5-41e7-b4c7-6c2edbdbe508\") " pod="openstack/nova-scheduler-0" Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.653858 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e122279d-7bf5-41e7-b4c7-6c2edbdbe508-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e122279d-7bf5-41e7-b4c7-6c2edbdbe508\") " pod="openstack/nova-scheduler-0" Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.653917 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e122279d-7bf5-41e7-b4c7-6c2edbdbe508-config-data\") pod \"nova-scheduler-0\" (UID: \"e122279d-7bf5-41e7-b4c7-6c2edbdbe508\") " pod="openstack/nova-scheduler-0" Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.665660 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e122279d-7bf5-41e7-b4c7-6c2edbdbe508-config-data\") pod \"nova-scheduler-0\" (UID: \"e122279d-7bf5-41e7-b4c7-6c2edbdbe508\") " pod="openstack/nova-scheduler-0" Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.667293 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e122279d-7bf5-41e7-b4c7-6c2edbdbe508-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e122279d-7bf5-41e7-b4c7-6c2edbdbe508\") " pod="openstack/nova-scheduler-0" Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.684945 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6gw4\" (UniqueName: \"kubernetes.io/projected/e122279d-7bf5-41e7-b4c7-6c2edbdbe508-kube-api-access-x6gw4\") pod \"nova-scheduler-0\" (UID: \"e122279d-7bf5-41e7-b4c7-6c2edbdbe508\") " pod="openstack/nova-scheduler-0" Nov 28 11:30:08 crc kubenswrapper[4923]: I1128 11:30:08.729882 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 11:30:09 crc kubenswrapper[4923]: I1128 11:30:09.226785 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5b84f3f-7303-45e3-8785-391209a4b7d7" path="/var/lib/kubelet/pods/d5b84f3f-7303-45e3-8785-391209a4b7d7/volumes" Nov 28 11:30:09 crc kubenswrapper[4923]: W1128 11:30:09.263662 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode122279d_7bf5_41e7_b4c7_6c2edbdbe508.slice/crio-8b686b1127a84d66631038f074163c744370a5b9c5f15b9c0b8e01ee1d9e36b6 WatchSource:0}: Error finding container 8b686b1127a84d66631038f074163c744370a5b9c5f15b9c0b8e01ee1d9e36b6: Status 404 returned error can't find the container with id 8b686b1127a84d66631038f074163c744370a5b9c5f15b9c0b8e01ee1d9e36b6 Nov 28 11:30:09 crc kubenswrapper[4923]: I1128 11:30:09.268405 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 11:30:09 crc kubenswrapper[4923]: I1128 11:30:09.272464 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"fa5948ea-11c1-4107-b068-e15ba465c8c6","Type":"ContainerStarted","Data":"f62227e14316ac99f0fc406453bea1f325924c4eb13a29d12d95c4a7adc1e079"} Nov 28 11:30:09 crc kubenswrapper[4923]: I1128 11:30:09.272832 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 28 11:30:09 crc kubenswrapper[4923]: I1128 11:30:09.302990 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.30289524 podStartE2EDuration="2.30289524s" podCreationTimestamp="2025-11-28 11:30:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:30:09.296974864 +0000 UTC m=+1288.425659074" watchObservedRunningTime="2025-11-28 11:30:09.30289524 +0000 UTC m=+1288.431579460" Nov 28 11:30:10 crc kubenswrapper[4923]: I1128 11:30:10.291201 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e122279d-7bf5-41e7-b4c7-6c2edbdbe508","Type":"ContainerStarted","Data":"ab9b856988843d8eb2836c4eb74a23637769b564e01892f542c17fa625c03c19"} Nov 28 11:30:10 crc kubenswrapper[4923]: I1128 11:30:10.292232 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e122279d-7bf5-41e7-b4c7-6c2edbdbe508","Type":"ContainerStarted","Data":"8b686b1127a84d66631038f074163c744370a5b9c5f15b9c0b8e01ee1d9e36b6"} Nov 28 11:30:10 crc kubenswrapper[4923]: I1128 11:30:10.308417 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.308386698 podStartE2EDuration="2.308386698s" podCreationTimestamp="2025-11-28 11:30:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:30:10.304385935 +0000 UTC m=+1289.433070145" watchObservedRunningTime="2025-11-28 11:30:10.308386698 +0000 UTC m=+1289.437070908" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.144353 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.215650 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb5f755a-29cb-453a-b61d-288cea6bba2e-config-data\") pod \"fb5f755a-29cb-453a-b61d-288cea6bba2e\" (UID: \"fb5f755a-29cb-453a-b61d-288cea6bba2e\") " Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.215921 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb5f755a-29cb-453a-b61d-288cea6bba2e-logs\") pod \"fb5f755a-29cb-453a-b61d-288cea6bba2e\" (UID: \"fb5f755a-29cb-453a-b61d-288cea6bba2e\") " Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.216028 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kwt4z\" (UniqueName: \"kubernetes.io/projected/fb5f755a-29cb-453a-b61d-288cea6bba2e-kube-api-access-kwt4z\") pod \"fb5f755a-29cb-453a-b61d-288cea6bba2e\" (UID: \"fb5f755a-29cb-453a-b61d-288cea6bba2e\") " Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.216206 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb5f755a-29cb-453a-b61d-288cea6bba2e-combined-ca-bundle\") pod \"fb5f755a-29cb-453a-b61d-288cea6bba2e\" (UID: \"fb5f755a-29cb-453a-b61d-288cea6bba2e\") " Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.216567 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb5f755a-29cb-453a-b61d-288cea6bba2e-logs" (OuterVolumeSpecName: "logs") pod "fb5f755a-29cb-453a-b61d-288cea6bba2e" (UID: "fb5f755a-29cb-453a-b61d-288cea6bba2e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.220573 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb5f755a-29cb-453a-b61d-288cea6bba2e-kube-api-access-kwt4z" (OuterVolumeSpecName: "kube-api-access-kwt4z") pod "fb5f755a-29cb-453a-b61d-288cea6bba2e" (UID: "fb5f755a-29cb-453a-b61d-288cea6bba2e"). InnerVolumeSpecName "kube-api-access-kwt4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.247575 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb5f755a-29cb-453a-b61d-288cea6bba2e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fb5f755a-29cb-453a-b61d-288cea6bba2e" (UID: "fb5f755a-29cb-453a-b61d-288cea6bba2e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.249646 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb5f755a-29cb-453a-b61d-288cea6bba2e-config-data" (OuterVolumeSpecName: "config-data") pod "fb5f755a-29cb-453a-b61d-288cea6bba2e" (UID: "fb5f755a-29cb-453a-b61d-288cea6bba2e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.303505 4923 generic.go:334] "Generic (PLEG): container finished" podID="fb5f755a-29cb-453a-b61d-288cea6bba2e" containerID="778106caa588d36abba6cfc29648e8eb84aafbc46b21d47472b47df3a30a1a57" exitCode=0 Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.304463 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.314211 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fb5f755a-29cb-453a-b61d-288cea6bba2e","Type":"ContainerDied","Data":"778106caa588d36abba6cfc29648e8eb84aafbc46b21d47472b47df3a30a1a57"} Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.314318 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fb5f755a-29cb-453a-b61d-288cea6bba2e","Type":"ContainerDied","Data":"5bb85132126497c6f6e8ce12b0cda98ad59c895d3f5dd24f554ab444d7d8f5d4"} Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.314360 4923 scope.go:117] "RemoveContainer" containerID="778106caa588d36abba6cfc29648e8eb84aafbc46b21d47472b47df3a30a1a57" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.317891 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb5f755a-29cb-453a-b61d-288cea6bba2e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.317922 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb5f755a-29cb-453a-b61d-288cea6bba2e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.317950 4923 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb5f755a-29cb-453a-b61d-288cea6bba2e-logs\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.317963 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kwt4z\" (UniqueName: \"kubernetes.io/projected/fb5f755a-29cb-453a-b61d-288cea6bba2e-kube-api-access-kwt4z\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.339726 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.350974 4923 scope.go:117] "RemoveContainer" containerID="923ca8eae6f7293f8ce238aacd5c1f3c26e1fd039b6d5b9a9b8f9803d297e562" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.396050 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.420834 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 11:30:11 crc kubenswrapper[4923]: E1128 11:30:11.423140 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb5f755a-29cb-453a-b61d-288cea6bba2e" containerName="nova-api-log" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.423219 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb5f755a-29cb-453a-b61d-288cea6bba2e" containerName="nova-api-log" Nov 28 11:30:11 crc kubenswrapper[4923]: E1128 11:30:11.423324 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb5f755a-29cb-453a-b61d-288cea6bba2e" containerName="nova-api-api" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.423382 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb5f755a-29cb-453a-b61d-288cea6bba2e" containerName="nova-api-api" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.423605 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb5f755a-29cb-453a-b61d-288cea6bba2e" containerName="nova-api-api" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.423966 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb5f755a-29cb-453a-b61d-288cea6bba2e" containerName="nova-api-log" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.425088 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.425242 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.428547 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.437518 4923 scope.go:117] "RemoveContainer" containerID="778106caa588d36abba6cfc29648e8eb84aafbc46b21d47472b47df3a30a1a57" Nov 28 11:30:11 crc kubenswrapper[4923]: E1128 11:30:11.438444 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"778106caa588d36abba6cfc29648e8eb84aafbc46b21d47472b47df3a30a1a57\": container with ID starting with 778106caa588d36abba6cfc29648e8eb84aafbc46b21d47472b47df3a30a1a57 not found: ID does not exist" containerID="778106caa588d36abba6cfc29648e8eb84aafbc46b21d47472b47df3a30a1a57" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.438479 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"778106caa588d36abba6cfc29648e8eb84aafbc46b21d47472b47df3a30a1a57"} err="failed to get container status \"778106caa588d36abba6cfc29648e8eb84aafbc46b21d47472b47df3a30a1a57\": rpc error: code = NotFound desc = could not find container \"778106caa588d36abba6cfc29648e8eb84aafbc46b21d47472b47df3a30a1a57\": container with ID starting with 778106caa588d36abba6cfc29648e8eb84aafbc46b21d47472b47df3a30a1a57 not found: ID does not exist" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.438502 4923 scope.go:117] "RemoveContainer" containerID="923ca8eae6f7293f8ce238aacd5c1f3c26e1fd039b6d5b9a9b8f9803d297e562" Nov 28 11:30:11 crc kubenswrapper[4923]: E1128 11:30:11.442089 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"923ca8eae6f7293f8ce238aacd5c1f3c26e1fd039b6d5b9a9b8f9803d297e562\": container with ID starting with 923ca8eae6f7293f8ce238aacd5c1f3c26e1fd039b6d5b9a9b8f9803d297e562 not found: ID does not exist" containerID="923ca8eae6f7293f8ce238aacd5c1f3c26e1fd039b6d5b9a9b8f9803d297e562" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.442119 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"923ca8eae6f7293f8ce238aacd5c1f3c26e1fd039b6d5b9a9b8f9803d297e562"} err="failed to get container status \"923ca8eae6f7293f8ce238aacd5c1f3c26e1fd039b6d5b9a9b8f9803d297e562\": rpc error: code = NotFound desc = could not find container \"923ca8eae6f7293f8ce238aacd5c1f3c26e1fd039b6d5b9a9b8f9803d297e562\": container with ID starting with 923ca8eae6f7293f8ce238aacd5c1f3c26e1fd039b6d5b9a9b8f9803d297e562 not found: ID does not exist" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.523714 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0cabae9-7237-43fb-b966-8f59d437c495-config-data\") pod \"nova-api-0\" (UID: \"c0cabae9-7237-43fb-b966-8f59d437c495\") " pod="openstack/nova-api-0" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.524196 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0cabae9-7237-43fb-b966-8f59d437c495-logs\") pod \"nova-api-0\" (UID: \"c0cabae9-7237-43fb-b966-8f59d437c495\") " pod="openstack/nova-api-0" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.524290 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvlx4\" (UniqueName: \"kubernetes.io/projected/c0cabae9-7237-43fb-b966-8f59d437c495-kube-api-access-rvlx4\") pod \"nova-api-0\" (UID: \"c0cabae9-7237-43fb-b966-8f59d437c495\") " pod="openstack/nova-api-0" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.524422 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0cabae9-7237-43fb-b966-8f59d437c495-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c0cabae9-7237-43fb-b966-8f59d437c495\") " pod="openstack/nova-api-0" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.560454 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.560589 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.625834 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0cabae9-7237-43fb-b966-8f59d437c495-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c0cabae9-7237-43fb-b966-8f59d437c495\") " pod="openstack/nova-api-0" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.625924 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0cabae9-7237-43fb-b966-8f59d437c495-config-data\") pod \"nova-api-0\" (UID: \"c0cabae9-7237-43fb-b966-8f59d437c495\") " pod="openstack/nova-api-0" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.626110 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0cabae9-7237-43fb-b966-8f59d437c495-logs\") pod \"nova-api-0\" (UID: \"c0cabae9-7237-43fb-b966-8f59d437c495\") " pod="openstack/nova-api-0" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.626857 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0cabae9-7237-43fb-b966-8f59d437c495-logs\") pod \"nova-api-0\" (UID: \"c0cabae9-7237-43fb-b966-8f59d437c495\") " pod="openstack/nova-api-0" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.626966 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvlx4\" (UniqueName: \"kubernetes.io/projected/c0cabae9-7237-43fb-b966-8f59d437c495-kube-api-access-rvlx4\") pod \"nova-api-0\" (UID: \"c0cabae9-7237-43fb-b966-8f59d437c495\") " pod="openstack/nova-api-0" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.631068 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0cabae9-7237-43fb-b966-8f59d437c495-config-data\") pod \"nova-api-0\" (UID: \"c0cabae9-7237-43fb-b966-8f59d437c495\") " pod="openstack/nova-api-0" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.632085 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0cabae9-7237-43fb-b966-8f59d437c495-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c0cabae9-7237-43fb-b966-8f59d437c495\") " pod="openstack/nova-api-0" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.653809 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvlx4\" (UniqueName: \"kubernetes.io/projected/c0cabae9-7237-43fb-b966-8f59d437c495-kube-api-access-rvlx4\") pod \"nova-api-0\" (UID: \"c0cabae9-7237-43fb-b966-8f59d437c495\") " pod="openstack/nova-api-0" Nov 28 11:30:11 crc kubenswrapper[4923]: I1128 11:30:11.753302 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 11:30:12 crc kubenswrapper[4923]: I1128 11:30:12.086034 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 11:30:12 crc kubenswrapper[4923]: W1128 11:30:12.090156 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc0cabae9_7237_43fb_b966_8f59d437c495.slice/crio-e6944f7fb6b71b1c16ea7a05da873ec8c66b5607c4e92680069fd891b090ede1 WatchSource:0}: Error finding container e6944f7fb6b71b1c16ea7a05da873ec8c66b5607c4e92680069fd891b090ede1: Status 404 returned error can't find the container with id e6944f7fb6b71b1c16ea7a05da873ec8c66b5607c4e92680069fd891b090ede1 Nov 28 11:30:12 crc kubenswrapper[4923]: I1128 11:30:12.314614 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c0cabae9-7237-43fb-b966-8f59d437c495","Type":"ContainerStarted","Data":"0f361de299348857d212d19738ca486b4f5291bdcc90e07d0c5faa113c21ad8b"} Nov 28 11:30:12 crc kubenswrapper[4923]: I1128 11:30:12.314656 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c0cabae9-7237-43fb-b966-8f59d437c495","Type":"ContainerStarted","Data":"e6944f7fb6b71b1c16ea7a05da873ec8c66b5607c4e92680069fd891b090ede1"} Nov 28 11:30:13 crc kubenswrapper[4923]: I1128 11:30:13.187126 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb5f755a-29cb-453a-b61d-288cea6bba2e" path="/var/lib/kubelet/pods/fb5f755a-29cb-453a-b61d-288cea6bba2e/volumes" Nov 28 11:30:13 crc kubenswrapper[4923]: I1128 11:30:13.328109 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c0cabae9-7237-43fb-b966-8f59d437c495","Type":"ContainerStarted","Data":"aebe39e0d037dc30e75e687afa6cf7f7f7ee9cbc01f18bf2333b48dd274d7e95"} Nov 28 11:30:13 crc kubenswrapper[4923]: I1128 11:30:13.359102 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.3590759119999998 podStartE2EDuration="2.359075912s" podCreationTimestamp="2025-11-28 11:30:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:30:13.350858572 +0000 UTC m=+1292.479542822" watchObservedRunningTime="2025-11-28 11:30:13.359075912 +0000 UTC m=+1292.487760162" Nov 28 11:30:13 crc kubenswrapper[4923]: I1128 11:30:13.731646 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 11:30:16 crc kubenswrapper[4923]: I1128 11:30:16.560856 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 11:30:16 crc kubenswrapper[4923]: I1128 11:30:16.561469 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 11:30:17 crc kubenswrapper[4923]: I1128 11:30:17.576554 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="57d1e985-8768-421e-910e-a65c632dd5d3" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.176:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 11:30:17 crc kubenswrapper[4923]: I1128 11:30:17.576570 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="57d1e985-8768-421e-910e-a65c632dd5d3" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.176:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 11:30:17 crc kubenswrapper[4923]: I1128 11:30:17.747971 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 28 11:30:18 crc kubenswrapper[4923]: I1128 11:30:18.731294 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 11:30:18 crc kubenswrapper[4923]: I1128 11:30:18.754816 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 11:30:19 crc kubenswrapper[4923]: I1128 11:30:19.445091 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 11:30:21 crc kubenswrapper[4923]: I1128 11:30:21.754132 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 11:30:21 crc kubenswrapper[4923]: I1128 11:30:21.754211 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 11:30:22 crc kubenswrapper[4923]: I1128 11:30:22.837120 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c0cabae9-7237-43fb-b966-8f59d437c495" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.179:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 11:30:22 crc kubenswrapper[4923]: I1128 11:30:22.837158 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c0cabae9-7237-43fb-b966-8f59d437c495" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.179:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 11:30:26 crc kubenswrapper[4923]: I1128 11:30:26.566691 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 11:30:26 crc kubenswrapper[4923]: I1128 11:30:26.569653 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 11:30:26 crc kubenswrapper[4923]: I1128 11:30:26.577929 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 11:30:27 crc kubenswrapper[4923]: I1128 11:30:27.502300 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.303532 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.418501 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.461645 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc-config-data\") pod \"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc\" (UID: \"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc\") " Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.461781 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2drt\" (UniqueName: \"kubernetes.io/projected/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc-kube-api-access-m2drt\") pod \"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc\" (UID: \"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc\") " Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.461920 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc-combined-ca-bundle\") pod \"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc\" (UID: \"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc\") " Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.492524 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc-kube-api-access-m2drt" (OuterVolumeSpecName: "kube-api-access-m2drt") pod "ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc" (UID: "ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc"). InnerVolumeSpecName "kube-api-access-m2drt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.507246 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc-config-data" (OuterVolumeSpecName: "config-data") pod "ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc" (UID: "ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.513803 4923 generic.go:334] "Generic (PLEG): container finished" podID="ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc" containerID="5a7cc3ed0766ba4eae9f769a1cb4911fc67cf4338ee6dbc2f0fcaabdbbcf893d" exitCode=137 Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.513860 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.513861 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc","Type":"ContainerDied","Data":"5a7cc3ed0766ba4eae9f769a1cb4911fc67cf4338ee6dbc2f0fcaabdbbcf893d"} Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.513966 4923 scope.go:117] "RemoveContainer" containerID="5a7cc3ed0766ba4eae9f769a1cb4911fc67cf4338ee6dbc2f0fcaabdbbcf893d" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.513925 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc","Type":"ContainerDied","Data":"9322871af9967c3e75ae25a09085ec0dbd4911150a7b1b12733258a433455235"} Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.522225 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc" (UID: "ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.567149 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.567194 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.567212 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2drt\" (UniqueName: \"kubernetes.io/projected/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc-kube-api-access-m2drt\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.597982 4923 scope.go:117] "RemoveContainer" containerID="5a7cc3ed0766ba4eae9f769a1cb4911fc67cf4338ee6dbc2f0fcaabdbbcf893d" Nov 28 11:30:29 crc kubenswrapper[4923]: E1128 11:30:29.598771 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a7cc3ed0766ba4eae9f769a1cb4911fc67cf4338ee6dbc2f0fcaabdbbcf893d\": container with ID starting with 5a7cc3ed0766ba4eae9f769a1cb4911fc67cf4338ee6dbc2f0fcaabdbbcf893d not found: ID does not exist" containerID="5a7cc3ed0766ba4eae9f769a1cb4911fc67cf4338ee6dbc2f0fcaabdbbcf893d" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.598825 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a7cc3ed0766ba4eae9f769a1cb4911fc67cf4338ee6dbc2f0fcaabdbbcf893d"} err="failed to get container status \"5a7cc3ed0766ba4eae9f769a1cb4911fc67cf4338ee6dbc2f0fcaabdbbcf893d\": rpc error: code = NotFound desc = could not find container \"5a7cc3ed0766ba4eae9f769a1cb4911fc67cf4338ee6dbc2f0fcaabdbbcf893d\": container with ID starting with 5a7cc3ed0766ba4eae9f769a1cb4911fc67cf4338ee6dbc2f0fcaabdbbcf893d not found: ID does not exist" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.870775 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.884224 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.893895 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 11:30:29 crc kubenswrapper[4923]: E1128 11:30:29.894347 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.894370 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.894614 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.895331 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.898442 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.902427 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.906109 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 11:30:29 crc kubenswrapper[4923]: I1128 11:30:29.911684 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 28 11:30:30 crc kubenswrapper[4923]: I1128 11:30:30.075169 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/63d46d86-5ddc-4a18-a13a-a49cb248237a-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"63d46d86-5ddc-4a18-a13a-a49cb248237a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:30 crc kubenswrapper[4923]: I1128 11:30:30.075272 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nf97w\" (UniqueName: \"kubernetes.io/projected/63d46d86-5ddc-4a18-a13a-a49cb248237a-kube-api-access-nf97w\") pod \"nova-cell1-novncproxy-0\" (UID: \"63d46d86-5ddc-4a18-a13a-a49cb248237a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:30 crc kubenswrapper[4923]: I1128 11:30:30.075321 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/63d46d86-5ddc-4a18-a13a-a49cb248237a-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"63d46d86-5ddc-4a18-a13a-a49cb248237a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:30 crc kubenswrapper[4923]: I1128 11:30:30.075375 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63d46d86-5ddc-4a18-a13a-a49cb248237a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"63d46d86-5ddc-4a18-a13a-a49cb248237a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:30 crc kubenswrapper[4923]: I1128 11:30:30.075416 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63d46d86-5ddc-4a18-a13a-a49cb248237a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"63d46d86-5ddc-4a18-a13a-a49cb248237a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:30 crc kubenswrapper[4923]: I1128 11:30:30.176678 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63d46d86-5ddc-4a18-a13a-a49cb248237a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"63d46d86-5ddc-4a18-a13a-a49cb248237a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:30 crc kubenswrapper[4923]: I1128 11:30:30.176811 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/63d46d86-5ddc-4a18-a13a-a49cb248237a-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"63d46d86-5ddc-4a18-a13a-a49cb248237a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:30 crc kubenswrapper[4923]: I1128 11:30:30.176927 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nf97w\" (UniqueName: \"kubernetes.io/projected/63d46d86-5ddc-4a18-a13a-a49cb248237a-kube-api-access-nf97w\") pod \"nova-cell1-novncproxy-0\" (UID: \"63d46d86-5ddc-4a18-a13a-a49cb248237a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:30 crc kubenswrapper[4923]: I1128 11:30:30.177027 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/63d46d86-5ddc-4a18-a13a-a49cb248237a-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"63d46d86-5ddc-4a18-a13a-a49cb248237a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:30 crc kubenswrapper[4923]: I1128 11:30:30.177106 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63d46d86-5ddc-4a18-a13a-a49cb248237a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"63d46d86-5ddc-4a18-a13a-a49cb248237a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:30 crc kubenswrapper[4923]: I1128 11:30:30.195653 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63d46d86-5ddc-4a18-a13a-a49cb248237a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"63d46d86-5ddc-4a18-a13a-a49cb248237a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:30 crc kubenswrapper[4923]: I1128 11:30:30.195807 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/63d46d86-5ddc-4a18-a13a-a49cb248237a-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"63d46d86-5ddc-4a18-a13a-a49cb248237a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:30 crc kubenswrapper[4923]: I1128 11:30:30.196885 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/63d46d86-5ddc-4a18-a13a-a49cb248237a-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"63d46d86-5ddc-4a18-a13a-a49cb248237a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:30 crc kubenswrapper[4923]: I1128 11:30:30.200684 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63d46d86-5ddc-4a18-a13a-a49cb248237a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"63d46d86-5ddc-4a18-a13a-a49cb248237a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:30 crc kubenswrapper[4923]: I1128 11:30:30.215405 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nf97w\" (UniqueName: \"kubernetes.io/projected/63d46d86-5ddc-4a18-a13a-a49cb248237a-kube-api-access-nf97w\") pod \"nova-cell1-novncproxy-0\" (UID: \"63d46d86-5ddc-4a18-a13a-a49cb248237a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:30 crc kubenswrapper[4923]: I1128 11:30:30.221980 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:30 crc kubenswrapper[4923]: I1128 11:30:30.711648 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 11:30:30 crc kubenswrapper[4923]: W1128 11:30:30.728126 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod63d46d86_5ddc_4a18_a13a_a49cb248237a.slice/crio-35088eb620e01c851b8e7129eb282ec8f48b1bea0a0540aac9e40c7a78a8567a WatchSource:0}: Error finding container 35088eb620e01c851b8e7129eb282ec8f48b1bea0a0540aac9e40c7a78a8567a: Status 404 returned error can't find the container with id 35088eb620e01c851b8e7129eb282ec8f48b1bea0a0540aac9e40c7a78a8567a Nov 28 11:30:31 crc kubenswrapper[4923]: I1128 11:30:31.184114 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc" path="/var/lib/kubelet/pods/ddbc46aa-3e1f-4c6c-8549-73107fd2d5cc/volumes" Nov 28 11:30:31 crc kubenswrapper[4923]: I1128 11:30:31.534682 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"63d46d86-5ddc-4a18-a13a-a49cb248237a","Type":"ContainerStarted","Data":"aa19b4ddf21fecbc78bf250edb0e3539de5d1fa6f477dcfca55750796c12b0a7"} Nov 28 11:30:31 crc kubenswrapper[4923]: I1128 11:30:31.534763 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"63d46d86-5ddc-4a18-a13a-a49cb248237a","Type":"ContainerStarted","Data":"35088eb620e01c851b8e7129eb282ec8f48b1bea0a0540aac9e40c7a78a8567a"} Nov 28 11:30:31 crc kubenswrapper[4923]: I1128 11:30:31.558887 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.5588679819999998 podStartE2EDuration="2.558867982s" podCreationTimestamp="2025-11-28 11:30:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:30:31.55272501 +0000 UTC m=+1310.681409230" watchObservedRunningTime="2025-11-28 11:30:31.558867982 +0000 UTC m=+1310.687552212" Nov 28 11:30:31 crc kubenswrapper[4923]: I1128 11:30:31.758980 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 11:30:31 crc kubenswrapper[4923]: I1128 11:30:31.759674 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 11:30:31 crc kubenswrapper[4923]: I1128 11:30:31.762171 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 11:30:31 crc kubenswrapper[4923]: I1128 11:30:31.767265 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 11:30:32 crc kubenswrapper[4923]: I1128 11:30:32.558499 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 11:30:32 crc kubenswrapper[4923]: I1128 11:30:32.563717 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 11:30:32 crc kubenswrapper[4923]: I1128 11:30:32.784412 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-rqrzr"] Nov 28 11:30:32 crc kubenswrapper[4923]: I1128 11:30:32.786241 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:32 crc kubenswrapper[4923]: I1128 11:30:32.795748 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-rqrzr"] Nov 28 11:30:32 crc kubenswrapper[4923]: I1128 11:30:32.942865 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-ovsdbserver-sb\") pod \"dnsmasq-dns-5b856c5697-rqrzr\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:32 crc kubenswrapper[4923]: I1128 11:30:32.942923 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87d6p\" (UniqueName: \"kubernetes.io/projected/7459bffc-943f-4bb7-a293-952b538a7b5e-kube-api-access-87d6p\") pod \"dnsmasq-dns-5b856c5697-rqrzr\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:32 crc kubenswrapper[4923]: I1128 11:30:32.942984 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-dns-svc\") pod \"dnsmasq-dns-5b856c5697-rqrzr\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:32 crc kubenswrapper[4923]: I1128 11:30:32.943029 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-config\") pod \"dnsmasq-dns-5b856c5697-rqrzr\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:32 crc kubenswrapper[4923]: I1128 11:30:32.943051 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-ovsdbserver-nb\") pod \"dnsmasq-dns-5b856c5697-rqrzr\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:33 crc kubenswrapper[4923]: I1128 11:30:33.044773 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-ovsdbserver-sb\") pod \"dnsmasq-dns-5b856c5697-rqrzr\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:33 crc kubenswrapper[4923]: I1128 11:30:33.044822 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87d6p\" (UniqueName: \"kubernetes.io/projected/7459bffc-943f-4bb7-a293-952b538a7b5e-kube-api-access-87d6p\") pod \"dnsmasq-dns-5b856c5697-rqrzr\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:33 crc kubenswrapper[4923]: I1128 11:30:33.044867 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-dns-svc\") pod \"dnsmasq-dns-5b856c5697-rqrzr\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:33 crc kubenswrapper[4923]: I1128 11:30:33.044912 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-config\") pod \"dnsmasq-dns-5b856c5697-rqrzr\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:33 crc kubenswrapper[4923]: I1128 11:30:33.044945 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-ovsdbserver-nb\") pod \"dnsmasq-dns-5b856c5697-rqrzr\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:33 crc kubenswrapper[4923]: I1128 11:30:33.045757 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-ovsdbserver-nb\") pod \"dnsmasq-dns-5b856c5697-rqrzr\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:33 crc kubenswrapper[4923]: I1128 11:30:33.046354 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-ovsdbserver-sb\") pod \"dnsmasq-dns-5b856c5697-rqrzr\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:33 crc kubenswrapper[4923]: I1128 11:30:33.047184 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-dns-svc\") pod \"dnsmasq-dns-5b856c5697-rqrzr\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:33 crc kubenswrapper[4923]: I1128 11:30:33.047699 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-config\") pod \"dnsmasq-dns-5b856c5697-rqrzr\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:33 crc kubenswrapper[4923]: I1128 11:30:33.063284 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87d6p\" (UniqueName: \"kubernetes.io/projected/7459bffc-943f-4bb7-a293-952b538a7b5e-kube-api-access-87d6p\") pod \"dnsmasq-dns-5b856c5697-rqrzr\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:33 crc kubenswrapper[4923]: I1128 11:30:33.129565 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:33 crc kubenswrapper[4923]: W1128 11:30:33.675988 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7459bffc_943f_4bb7_a293_952b538a7b5e.slice/crio-e3c91733f7e719e7531206875aa7bc5715a7f6a352c689835e1bfd34ff525aa8 WatchSource:0}: Error finding container e3c91733f7e719e7531206875aa7bc5715a7f6a352c689835e1bfd34ff525aa8: Status 404 returned error can't find the container with id e3c91733f7e719e7531206875aa7bc5715a7f6a352c689835e1bfd34ff525aa8 Nov 28 11:30:33 crc kubenswrapper[4923]: I1128 11:30:33.677269 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-rqrzr"] Nov 28 11:30:34 crc kubenswrapper[4923]: I1128 11:30:34.573564 4923 generic.go:334] "Generic (PLEG): container finished" podID="7459bffc-943f-4bb7-a293-952b538a7b5e" containerID="212014ca59ea0c15cffe96fc3cd477a2721f81262fa09c7762665489316942fa" exitCode=0 Nov 28 11:30:34 crc kubenswrapper[4923]: I1128 11:30:34.574527 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" event={"ID":"7459bffc-943f-4bb7-a293-952b538a7b5e","Type":"ContainerDied","Data":"212014ca59ea0c15cffe96fc3cd477a2721f81262fa09c7762665489316942fa"} Nov 28 11:30:34 crc kubenswrapper[4923]: I1128 11:30:34.574648 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" event={"ID":"7459bffc-943f-4bb7-a293-952b538a7b5e","Type":"ContainerStarted","Data":"e3c91733f7e719e7531206875aa7bc5715a7f6a352c689835e1bfd34ff525aa8"} Nov 28 11:30:35 crc kubenswrapper[4923]: I1128 11:30:35.155122 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:30:35 crc kubenswrapper[4923]: I1128 11:30:35.155439 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerName="ceilometer-central-agent" containerID="cri-o://f7e7622f5008a240742beba625688787b9273771d86e79898448ce0cda55b79c" gracePeriod=30 Nov 28 11:30:35 crc kubenswrapper[4923]: I1128 11:30:35.155856 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerName="proxy-httpd" containerID="cri-o://a3447021828fb8202e6be520e5cb4b0b7bfc128ab753f18cc40c949570604056" gracePeriod=30 Nov 28 11:30:35 crc kubenswrapper[4923]: I1128 11:30:35.155923 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerName="sg-core" containerID="cri-o://88b6575f62304bfed1222f611085fe62823893245ec149eadf2de57718cfef99" gracePeriod=30 Nov 28 11:30:35 crc kubenswrapper[4923]: I1128 11:30:35.156012 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerName="ceilometer-notification-agent" containerID="cri-o://da172f3b4dc7fdbc034daadc9929d699474ce8e227799073a7a52f2de4bc239b" gracePeriod=30 Nov 28 11:30:35 crc kubenswrapper[4923]: I1128 11:30:35.222404 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:35 crc kubenswrapper[4923]: I1128 11:30:35.405086 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 11:30:35 crc kubenswrapper[4923]: I1128 11:30:35.585649 4923 generic.go:334] "Generic (PLEG): container finished" podID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerID="a3447021828fb8202e6be520e5cb4b0b7bfc128ab753f18cc40c949570604056" exitCode=0 Nov 28 11:30:35 crc kubenswrapper[4923]: I1128 11:30:35.586374 4923 generic.go:334] "Generic (PLEG): container finished" podID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerID="88b6575f62304bfed1222f611085fe62823893245ec149eadf2de57718cfef99" exitCode=2 Nov 28 11:30:35 crc kubenswrapper[4923]: I1128 11:30:35.586397 4923 generic.go:334] "Generic (PLEG): container finished" podID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerID="f7e7622f5008a240742beba625688787b9273771d86e79898448ce0cda55b79c" exitCode=0 Nov 28 11:30:35 crc kubenswrapper[4923]: I1128 11:30:35.585685 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86","Type":"ContainerDied","Data":"a3447021828fb8202e6be520e5cb4b0b7bfc128ab753f18cc40c949570604056"} Nov 28 11:30:35 crc kubenswrapper[4923]: I1128 11:30:35.586468 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86","Type":"ContainerDied","Data":"88b6575f62304bfed1222f611085fe62823893245ec149eadf2de57718cfef99"} Nov 28 11:30:35 crc kubenswrapper[4923]: I1128 11:30:35.586483 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86","Type":"ContainerDied","Data":"f7e7622f5008a240742beba625688787b9273771d86e79898448ce0cda55b79c"} Nov 28 11:30:35 crc kubenswrapper[4923]: I1128 11:30:35.589548 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c0cabae9-7237-43fb-b966-8f59d437c495" containerName="nova-api-log" containerID="cri-o://0f361de299348857d212d19738ca486b4f5291bdcc90e07d0c5faa113c21ad8b" gracePeriod=30 Nov 28 11:30:35 crc kubenswrapper[4923]: I1128 11:30:35.589735 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c0cabae9-7237-43fb-b966-8f59d437c495" containerName="nova-api-api" containerID="cri-o://aebe39e0d037dc30e75e687afa6cf7f7f7ee9cbc01f18bf2333b48dd274d7e95" gracePeriod=30 Nov 28 11:30:35 crc kubenswrapper[4923]: I1128 11:30:35.589787 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" event={"ID":"7459bffc-943f-4bb7-a293-952b538a7b5e","Type":"ContainerStarted","Data":"91a1f6464728cbaca6a6d511a696a96f4bbf009d6e3e860f30ef9e55cba6f58e"} Nov 28 11:30:35 crc kubenswrapper[4923]: I1128 11:30:35.589982 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:35 crc kubenswrapper[4923]: I1128 11:30:35.609108 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" podStartSLOduration=3.609092547 podStartE2EDuration="3.609092547s" podCreationTimestamp="2025-11-28 11:30:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:30:35.605415444 +0000 UTC m=+1314.734099654" watchObservedRunningTime="2025-11-28 11:30:35.609092547 +0000 UTC m=+1314.737776757" Nov 28 11:30:36 crc kubenswrapper[4923]: I1128 11:30:36.598783 4923 generic.go:334] "Generic (PLEG): container finished" podID="c0cabae9-7237-43fb-b966-8f59d437c495" containerID="0f361de299348857d212d19738ca486b4f5291bdcc90e07d0c5faa113c21ad8b" exitCode=143 Nov 28 11:30:36 crc kubenswrapper[4923]: I1128 11:30:36.598862 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c0cabae9-7237-43fb-b966-8f59d437c495","Type":"ContainerDied","Data":"0f361de299348857d212d19738ca486b4f5291bdcc90e07d0c5faa113c21ad8b"} Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.300647 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.471351 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0cabae9-7237-43fb-b966-8f59d437c495-logs\") pod \"c0cabae9-7237-43fb-b966-8f59d437c495\" (UID: \"c0cabae9-7237-43fb-b966-8f59d437c495\") " Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.471435 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0cabae9-7237-43fb-b966-8f59d437c495-config-data\") pod \"c0cabae9-7237-43fb-b966-8f59d437c495\" (UID: \"c0cabae9-7237-43fb-b966-8f59d437c495\") " Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.471528 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvlx4\" (UniqueName: \"kubernetes.io/projected/c0cabae9-7237-43fb-b966-8f59d437c495-kube-api-access-rvlx4\") pod \"c0cabae9-7237-43fb-b966-8f59d437c495\" (UID: \"c0cabae9-7237-43fb-b966-8f59d437c495\") " Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.471624 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0cabae9-7237-43fb-b966-8f59d437c495-combined-ca-bundle\") pod \"c0cabae9-7237-43fb-b966-8f59d437c495\" (UID: \"c0cabae9-7237-43fb-b966-8f59d437c495\") " Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.471762 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0cabae9-7237-43fb-b966-8f59d437c495-logs" (OuterVolumeSpecName: "logs") pod "c0cabae9-7237-43fb-b966-8f59d437c495" (UID: "c0cabae9-7237-43fb-b966-8f59d437c495"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.472057 4923 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0cabae9-7237-43fb-b966-8f59d437c495-logs\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.493170 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0cabae9-7237-43fb-b966-8f59d437c495-kube-api-access-rvlx4" (OuterVolumeSpecName: "kube-api-access-rvlx4") pod "c0cabae9-7237-43fb-b966-8f59d437c495" (UID: "c0cabae9-7237-43fb-b966-8f59d437c495"). InnerVolumeSpecName "kube-api-access-rvlx4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.514216 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0cabae9-7237-43fb-b966-8f59d437c495-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c0cabae9-7237-43fb-b966-8f59d437c495" (UID: "c0cabae9-7237-43fb-b966-8f59d437c495"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.573736 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvlx4\" (UniqueName: \"kubernetes.io/projected/c0cabae9-7237-43fb-b966-8f59d437c495-kube-api-access-rvlx4\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.573766 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0cabae9-7237-43fb-b966-8f59d437c495-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.581853 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0cabae9-7237-43fb-b966-8f59d437c495-config-data" (OuterVolumeSpecName: "config-data") pod "c0cabae9-7237-43fb-b966-8f59d437c495" (UID: "c0cabae9-7237-43fb-b966-8f59d437c495"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.626906 4923 generic.go:334] "Generic (PLEG): container finished" podID="c0cabae9-7237-43fb-b966-8f59d437c495" containerID="aebe39e0d037dc30e75e687afa6cf7f7f7ee9cbc01f18bf2333b48dd274d7e95" exitCode=0 Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.626958 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c0cabae9-7237-43fb-b966-8f59d437c495","Type":"ContainerDied","Data":"aebe39e0d037dc30e75e687afa6cf7f7f7ee9cbc01f18bf2333b48dd274d7e95"} Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.626983 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c0cabae9-7237-43fb-b966-8f59d437c495","Type":"ContainerDied","Data":"e6944f7fb6b71b1c16ea7a05da873ec8c66b5607c4e92680069fd891b090ede1"} Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.626998 4923 scope.go:117] "RemoveContainer" containerID="aebe39e0d037dc30e75e687afa6cf7f7f7ee9cbc01f18bf2333b48dd274d7e95" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.627106 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.675806 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0cabae9-7237-43fb-b966-8f59d437c495-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.690198 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.705024 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.711802 4923 scope.go:117] "RemoveContainer" containerID="0f361de299348857d212d19738ca486b4f5291bdcc90e07d0c5faa113c21ad8b" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.725614 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 11:30:39 crc kubenswrapper[4923]: E1128 11:30:39.726002 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0cabae9-7237-43fb-b966-8f59d437c495" containerName="nova-api-api" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.726014 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0cabae9-7237-43fb-b966-8f59d437c495" containerName="nova-api-api" Nov 28 11:30:39 crc kubenswrapper[4923]: E1128 11:30:39.726034 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0cabae9-7237-43fb-b966-8f59d437c495" containerName="nova-api-log" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.726040 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0cabae9-7237-43fb-b966-8f59d437c495" containerName="nova-api-log" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.726223 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0cabae9-7237-43fb-b966-8f59d437c495" containerName="nova-api-api" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.726239 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0cabae9-7237-43fb-b966-8f59d437c495" containerName="nova-api-log" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.727124 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.730912 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.732395 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.732576 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.748379 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.764101 4923 scope.go:117] "RemoveContainer" containerID="aebe39e0d037dc30e75e687afa6cf7f7f7ee9cbc01f18bf2333b48dd274d7e95" Nov 28 11:30:39 crc kubenswrapper[4923]: E1128 11:30:39.766139 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aebe39e0d037dc30e75e687afa6cf7f7f7ee9cbc01f18bf2333b48dd274d7e95\": container with ID starting with aebe39e0d037dc30e75e687afa6cf7f7f7ee9cbc01f18bf2333b48dd274d7e95 not found: ID does not exist" containerID="aebe39e0d037dc30e75e687afa6cf7f7f7ee9cbc01f18bf2333b48dd274d7e95" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.766168 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aebe39e0d037dc30e75e687afa6cf7f7f7ee9cbc01f18bf2333b48dd274d7e95"} err="failed to get container status \"aebe39e0d037dc30e75e687afa6cf7f7f7ee9cbc01f18bf2333b48dd274d7e95\": rpc error: code = NotFound desc = could not find container \"aebe39e0d037dc30e75e687afa6cf7f7f7ee9cbc01f18bf2333b48dd274d7e95\": container with ID starting with aebe39e0d037dc30e75e687afa6cf7f7f7ee9cbc01f18bf2333b48dd274d7e95 not found: ID does not exist" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.766189 4923 scope.go:117] "RemoveContainer" containerID="0f361de299348857d212d19738ca486b4f5291bdcc90e07d0c5faa113c21ad8b" Nov 28 11:30:39 crc kubenswrapper[4923]: E1128 11:30:39.766413 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f361de299348857d212d19738ca486b4f5291bdcc90e07d0c5faa113c21ad8b\": container with ID starting with 0f361de299348857d212d19738ca486b4f5291bdcc90e07d0c5faa113c21ad8b not found: ID does not exist" containerID="0f361de299348857d212d19738ca486b4f5291bdcc90e07d0c5faa113c21ad8b" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.766507 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f361de299348857d212d19738ca486b4f5291bdcc90e07d0c5faa113c21ad8b"} err="failed to get container status \"0f361de299348857d212d19738ca486b4f5291bdcc90e07d0c5faa113c21ad8b\": rpc error: code = NotFound desc = could not find container \"0f361de299348857d212d19738ca486b4f5291bdcc90e07d0c5faa113c21ad8b\": container with ID starting with 0f361de299348857d212d19738ca486b4f5291bdcc90e07d0c5faa113c21ad8b not found: ID does not exist" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.879180 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-internal-tls-certs\") pod \"nova-api-0\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.879233 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-config-data\") pod \"nova-api-0\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.879313 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66j98\" (UniqueName: \"kubernetes.io/projected/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-kube-api-access-66j98\") pod \"nova-api-0\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.879349 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.879392 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-public-tls-certs\") pod \"nova-api-0\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.879410 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-logs\") pod \"nova-api-0\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.981029 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-internal-tls-certs\") pod \"nova-api-0\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.981078 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-config-data\") pod \"nova-api-0\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.981140 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66j98\" (UniqueName: \"kubernetes.io/projected/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-kube-api-access-66j98\") pod \"nova-api-0\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.981181 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.981225 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-public-tls-certs\") pod \"nova-api-0\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.981245 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-logs\") pod \"nova-api-0\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.981627 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-logs\") pod \"nova-api-0\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.987023 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.990251 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-public-tls-certs\") pod \"nova-api-0\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.992791 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-config-data\") pod \"nova-api-0\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " pod="openstack/nova-api-0" Nov 28 11:30:39 crc kubenswrapper[4923]: I1128 11:30:39.998085 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-internal-tls-certs\") pod \"nova-api-0\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " pod="openstack/nova-api-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.003545 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66j98\" (UniqueName: \"kubernetes.io/projected/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-kube-api-access-66j98\") pod \"nova-api-0\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " pod="openstack/nova-api-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.044670 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.153978 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.222611 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.251211 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.286151 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-scripts\") pod \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.286277 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-log-httpd\") pod \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.286316 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-config-data\") pod \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.286430 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-ceilometer-tls-certs\") pod \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.286465 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-combined-ca-bundle\") pod \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.286496 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-sg-core-conf-yaml\") pod \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.286544 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-run-httpd\") pod \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.286586 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pz8xh\" (UniqueName: \"kubernetes.io/projected/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-kube-api-access-pz8xh\") pod \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\" (UID: \"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86\") " Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.288407 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" (UID: "f1d9d17f-cc44-4cac-a8d3-b7217e4ace86"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.291385 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" (UID: "f1d9d17f-cc44-4cac-a8d3-b7217e4ace86"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.304324 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-scripts" (OuterVolumeSpecName: "scripts") pod "f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" (UID: "f1d9d17f-cc44-4cac-a8d3-b7217e4ace86"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.318184 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-kube-api-access-pz8xh" (OuterVolumeSpecName: "kube-api-access-pz8xh") pod "f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" (UID: "f1d9d17f-cc44-4cac-a8d3-b7217e4ace86"). InnerVolumeSpecName "kube-api-access-pz8xh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.351216 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.389263 4923 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.389296 4923 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.389309 4923 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.389318 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pz8xh\" (UniqueName: \"kubernetes.io/projected/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-kube-api-access-pz8xh\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.394342 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" (UID: "f1d9d17f-cc44-4cac-a8d3-b7217e4ace86"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.395217 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" (UID: "f1d9d17f-cc44-4cac-a8d3-b7217e4ace86"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.450202 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" (UID: "f1d9d17f-cc44-4cac-a8d3-b7217e4ace86"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.466080 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-config-data" (OuterVolumeSpecName: "config-data") pod "f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" (UID: "f1d9d17f-cc44-4cac-a8d3-b7217e4ace86"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.490804 4923 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.490833 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.490842 4923 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.490864 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.639318 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463","Type":"ContainerStarted","Data":"33a42edcf28b8e3567051f03ea9a0c4b0ab5953978066fab25862928e8a5921e"} Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.639688 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463","Type":"ContainerStarted","Data":"ed7d390949cac379049aa814e45535885879f26c28b6660e89564aa775fbf798"} Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.642113 4923 generic.go:334] "Generic (PLEG): container finished" podID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerID="da172f3b4dc7fdbc034daadc9929d699474ce8e227799073a7a52f2de4bc239b" exitCode=0 Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.642190 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.642201 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86","Type":"ContainerDied","Data":"da172f3b4dc7fdbc034daadc9929d699474ce8e227799073a7a52f2de4bc239b"} Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.642235 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f1d9d17f-cc44-4cac-a8d3-b7217e4ace86","Type":"ContainerDied","Data":"022ed6859aeda20e9133f6bb50137426d3f13e0f4b44d1f331be632fa037b5a0"} Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.642268 4923 scope.go:117] "RemoveContainer" containerID="a3447021828fb8202e6be520e5cb4b0b7bfc128ab753f18cc40c949570604056" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.672397 4923 scope.go:117] "RemoveContainer" containerID="88b6575f62304bfed1222f611085fe62823893245ec149eadf2de57718cfef99" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.676780 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.681910 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.690765 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.704035 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:30:40 crc kubenswrapper[4923]: E1128 11:30:40.704600 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerName="proxy-httpd" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.704668 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerName="proxy-httpd" Nov 28 11:30:40 crc kubenswrapper[4923]: E1128 11:30:40.704732 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerName="sg-core" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.704788 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerName="sg-core" Nov 28 11:30:40 crc kubenswrapper[4923]: E1128 11:30:40.704843 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerName="ceilometer-central-agent" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.704892 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerName="ceilometer-central-agent" Nov 28 11:30:40 crc kubenswrapper[4923]: E1128 11:30:40.704974 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerName="ceilometer-notification-agent" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.705026 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerName="ceilometer-notification-agent" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.705252 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerName="sg-core" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.705322 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerName="ceilometer-notification-agent" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.705403 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerName="ceilometer-central-agent" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.705463 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" containerName="proxy-httpd" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.706964 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.710162 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.710420 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.723738 4923 scope.go:117] "RemoveContainer" containerID="da172f3b4dc7fdbc034daadc9929d699474ce8e227799073a7a52f2de4bc239b" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.728481 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.737138 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.797953 4923 scope.go:117] "RemoveContainer" containerID="f7e7622f5008a240742beba625688787b9273771d86e79898448ce0cda55b79c" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.806838 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70301603-f005-4e2f-90c9-6daedf3d09a9-config-data\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.806996 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70301603-f005-4e2f-90c9-6daedf3d09a9-scripts\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.807084 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/70301603-f005-4e2f-90c9-6daedf3d09a9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.807151 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70301603-f005-4e2f-90c9-6daedf3d09a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.807244 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/70301603-f005-4e2f-90c9-6daedf3d09a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.807322 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/70301603-f005-4e2f-90c9-6daedf3d09a9-run-httpd\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.807438 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/70301603-f005-4e2f-90c9-6daedf3d09a9-log-httpd\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.807529 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fstp6\" (UniqueName: \"kubernetes.io/projected/70301603-f005-4e2f-90c9-6daedf3d09a9-kube-api-access-fstp6\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.823016 4923 scope.go:117] "RemoveContainer" containerID="a3447021828fb8202e6be520e5cb4b0b7bfc128ab753f18cc40c949570604056" Nov 28 11:30:40 crc kubenswrapper[4923]: E1128 11:30:40.825422 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3447021828fb8202e6be520e5cb4b0b7bfc128ab753f18cc40c949570604056\": container with ID starting with a3447021828fb8202e6be520e5cb4b0b7bfc128ab753f18cc40c949570604056 not found: ID does not exist" containerID="a3447021828fb8202e6be520e5cb4b0b7bfc128ab753f18cc40c949570604056" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.825468 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3447021828fb8202e6be520e5cb4b0b7bfc128ab753f18cc40c949570604056"} err="failed to get container status \"a3447021828fb8202e6be520e5cb4b0b7bfc128ab753f18cc40c949570604056\": rpc error: code = NotFound desc = could not find container \"a3447021828fb8202e6be520e5cb4b0b7bfc128ab753f18cc40c949570604056\": container with ID starting with a3447021828fb8202e6be520e5cb4b0b7bfc128ab753f18cc40c949570604056 not found: ID does not exist" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.825495 4923 scope.go:117] "RemoveContainer" containerID="88b6575f62304bfed1222f611085fe62823893245ec149eadf2de57718cfef99" Nov 28 11:30:40 crc kubenswrapper[4923]: E1128 11:30:40.825865 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88b6575f62304bfed1222f611085fe62823893245ec149eadf2de57718cfef99\": container with ID starting with 88b6575f62304bfed1222f611085fe62823893245ec149eadf2de57718cfef99 not found: ID does not exist" containerID="88b6575f62304bfed1222f611085fe62823893245ec149eadf2de57718cfef99" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.825915 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88b6575f62304bfed1222f611085fe62823893245ec149eadf2de57718cfef99"} err="failed to get container status \"88b6575f62304bfed1222f611085fe62823893245ec149eadf2de57718cfef99\": rpc error: code = NotFound desc = could not find container \"88b6575f62304bfed1222f611085fe62823893245ec149eadf2de57718cfef99\": container with ID starting with 88b6575f62304bfed1222f611085fe62823893245ec149eadf2de57718cfef99 not found: ID does not exist" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.825977 4923 scope.go:117] "RemoveContainer" containerID="da172f3b4dc7fdbc034daadc9929d699474ce8e227799073a7a52f2de4bc239b" Nov 28 11:30:40 crc kubenswrapper[4923]: E1128 11:30:40.828271 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da172f3b4dc7fdbc034daadc9929d699474ce8e227799073a7a52f2de4bc239b\": container with ID starting with da172f3b4dc7fdbc034daadc9929d699474ce8e227799073a7a52f2de4bc239b not found: ID does not exist" containerID="da172f3b4dc7fdbc034daadc9929d699474ce8e227799073a7a52f2de4bc239b" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.828306 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da172f3b4dc7fdbc034daadc9929d699474ce8e227799073a7a52f2de4bc239b"} err="failed to get container status \"da172f3b4dc7fdbc034daadc9929d699474ce8e227799073a7a52f2de4bc239b\": rpc error: code = NotFound desc = could not find container \"da172f3b4dc7fdbc034daadc9929d699474ce8e227799073a7a52f2de4bc239b\": container with ID starting with da172f3b4dc7fdbc034daadc9929d699474ce8e227799073a7a52f2de4bc239b not found: ID does not exist" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.828328 4923 scope.go:117] "RemoveContainer" containerID="f7e7622f5008a240742beba625688787b9273771d86e79898448ce0cda55b79c" Nov 28 11:30:40 crc kubenswrapper[4923]: E1128 11:30:40.832233 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7e7622f5008a240742beba625688787b9273771d86e79898448ce0cda55b79c\": container with ID starting with f7e7622f5008a240742beba625688787b9273771d86e79898448ce0cda55b79c not found: ID does not exist" containerID="f7e7622f5008a240742beba625688787b9273771d86e79898448ce0cda55b79c" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.832275 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7e7622f5008a240742beba625688787b9273771d86e79898448ce0cda55b79c"} err="failed to get container status \"f7e7622f5008a240742beba625688787b9273771d86e79898448ce0cda55b79c\": rpc error: code = NotFound desc = could not find container \"f7e7622f5008a240742beba625688787b9273771d86e79898448ce0cda55b79c\": container with ID starting with f7e7622f5008a240742beba625688787b9273771d86e79898448ce0cda55b79c not found: ID does not exist" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.882741 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-z2zwx"] Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.886663 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-z2zwx" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.889652 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.894405 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.909526 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70301603-f005-4e2f-90c9-6daedf3d09a9-scripts\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.909593 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/70301603-f005-4e2f-90c9-6daedf3d09a9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.909622 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70301603-f005-4e2f-90c9-6daedf3d09a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.909651 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/70301603-f005-4e2f-90c9-6daedf3d09a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.909673 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/70301603-f005-4e2f-90c9-6daedf3d09a9-run-httpd\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.909775 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/70301603-f005-4e2f-90c9-6daedf3d09a9-log-httpd\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.909814 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fstp6\" (UniqueName: \"kubernetes.io/projected/70301603-f005-4e2f-90c9-6daedf3d09a9-kube-api-access-fstp6\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.909873 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70301603-f005-4e2f-90c9-6daedf3d09a9-config-data\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.914632 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/70301603-f005-4e2f-90c9-6daedf3d09a9-run-httpd\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.915712 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/70301603-f005-4e2f-90c9-6daedf3d09a9-log-httpd\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.932911 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/70301603-f005-4e2f-90c9-6daedf3d09a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.934125 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70301603-f005-4e2f-90c9-6daedf3d09a9-config-data\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.935592 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/70301603-f005-4e2f-90c9-6daedf3d09a9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.936226 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70301603-f005-4e2f-90c9-6daedf3d09a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.944366 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-z2zwx"] Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.960825 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fstp6\" (UniqueName: \"kubernetes.io/projected/70301603-f005-4e2f-90c9-6daedf3d09a9-kube-api-access-fstp6\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:40 crc kubenswrapper[4923]: I1128 11:30:40.973434 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70301603-f005-4e2f-90c9-6daedf3d09a9-scripts\") pod \"ceilometer-0\" (UID: \"70301603-f005-4e2f-90c9-6daedf3d09a9\") " pod="openstack/ceilometer-0" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.011398 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhjql\" (UniqueName: \"kubernetes.io/projected/327ab34b-b6a6-4e7d-be85-933e09902a9a-kube-api-access-jhjql\") pod \"nova-cell1-cell-mapping-z2zwx\" (UID: \"327ab34b-b6a6-4e7d-be85-933e09902a9a\") " pod="openstack/nova-cell1-cell-mapping-z2zwx" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.011450 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/327ab34b-b6a6-4e7d-be85-933e09902a9a-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-z2zwx\" (UID: \"327ab34b-b6a6-4e7d-be85-933e09902a9a\") " pod="openstack/nova-cell1-cell-mapping-z2zwx" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.011546 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/327ab34b-b6a6-4e7d-be85-933e09902a9a-config-data\") pod \"nova-cell1-cell-mapping-z2zwx\" (UID: \"327ab34b-b6a6-4e7d-be85-933e09902a9a\") " pod="openstack/nova-cell1-cell-mapping-z2zwx" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.011579 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/327ab34b-b6a6-4e7d-be85-933e09902a9a-scripts\") pod \"nova-cell1-cell-mapping-z2zwx\" (UID: \"327ab34b-b6a6-4e7d-be85-933e09902a9a\") " pod="openstack/nova-cell1-cell-mapping-z2zwx" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.030768 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.119588 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/327ab34b-b6a6-4e7d-be85-933e09902a9a-config-data\") pod \"nova-cell1-cell-mapping-z2zwx\" (UID: \"327ab34b-b6a6-4e7d-be85-933e09902a9a\") " pod="openstack/nova-cell1-cell-mapping-z2zwx" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.119642 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/327ab34b-b6a6-4e7d-be85-933e09902a9a-scripts\") pod \"nova-cell1-cell-mapping-z2zwx\" (UID: \"327ab34b-b6a6-4e7d-be85-933e09902a9a\") " pod="openstack/nova-cell1-cell-mapping-z2zwx" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.119704 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhjql\" (UniqueName: \"kubernetes.io/projected/327ab34b-b6a6-4e7d-be85-933e09902a9a-kube-api-access-jhjql\") pod \"nova-cell1-cell-mapping-z2zwx\" (UID: \"327ab34b-b6a6-4e7d-be85-933e09902a9a\") " pod="openstack/nova-cell1-cell-mapping-z2zwx" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.119729 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/327ab34b-b6a6-4e7d-be85-933e09902a9a-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-z2zwx\" (UID: \"327ab34b-b6a6-4e7d-be85-933e09902a9a\") " pod="openstack/nova-cell1-cell-mapping-z2zwx" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.124503 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/327ab34b-b6a6-4e7d-be85-933e09902a9a-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-z2zwx\" (UID: \"327ab34b-b6a6-4e7d-be85-933e09902a9a\") " pod="openstack/nova-cell1-cell-mapping-z2zwx" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.129303 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.129657 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.136408 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/327ab34b-b6a6-4e7d-be85-933e09902a9a-config-data\") pod \"nova-cell1-cell-mapping-z2zwx\" (UID: \"327ab34b-b6a6-4e7d-be85-933e09902a9a\") " pod="openstack/nova-cell1-cell-mapping-z2zwx" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.136513 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/327ab34b-b6a6-4e7d-be85-933e09902a9a-scripts\") pod \"nova-cell1-cell-mapping-z2zwx\" (UID: \"327ab34b-b6a6-4e7d-be85-933e09902a9a\") " pod="openstack/nova-cell1-cell-mapping-z2zwx" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.144504 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhjql\" (UniqueName: \"kubernetes.io/projected/327ab34b-b6a6-4e7d-be85-933e09902a9a-kube-api-access-jhjql\") pod \"nova-cell1-cell-mapping-z2zwx\" (UID: \"327ab34b-b6a6-4e7d-be85-933e09902a9a\") " pod="openstack/nova-cell1-cell-mapping-z2zwx" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.188435 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0cabae9-7237-43fb-b966-8f59d437c495" path="/var/lib/kubelet/pods/c0cabae9-7237-43fb-b966-8f59d437c495/volumes" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.189080 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1d9d17f-cc44-4cac-a8d3-b7217e4ace86" path="/var/lib/kubelet/pods/f1d9d17f-cc44-4cac-a8d3-b7217e4ace86/volumes" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.347313 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-z2zwx" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.557083 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.561060 4923 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.653515 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463","Type":"ContainerStarted","Data":"a7cba8f96f099c3af89230827f8db9f68878b8e3171ce10cfd9945bc3e0be8c6"} Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.655673 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"70301603-f005-4e2f-90c9-6daedf3d09a9","Type":"ContainerStarted","Data":"ec3def95ef111d03f9add791be4951a45f03c4a0508b2249de6a93595503dba6"} Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.780168 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.7801529560000002 podStartE2EDuration="2.780152956s" podCreationTimestamp="2025-11-28 11:30:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:30:41.673713556 +0000 UTC m=+1320.802397766" watchObservedRunningTime="2025-11-28 11:30:41.780152956 +0000 UTC m=+1320.908837166" Nov 28 11:30:41 crc kubenswrapper[4923]: I1128 11:30:41.784430 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-z2zwx"] Nov 28 11:30:42 crc kubenswrapper[4923]: I1128 11:30:42.678960 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"70301603-f005-4e2f-90c9-6daedf3d09a9","Type":"ContainerStarted","Data":"4ff663b84de80780c6974705f58f5276259bdb55f1d112780b45e175830c8c7d"} Nov 28 11:30:42 crc kubenswrapper[4923]: I1128 11:30:42.682896 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-z2zwx" event={"ID":"327ab34b-b6a6-4e7d-be85-933e09902a9a","Type":"ContainerStarted","Data":"c96f5654fa63d25dd422d3bae27f4ce71673d7e8b016ec7225a9b8070f0e5bf8"} Nov 28 11:30:42 crc kubenswrapper[4923]: I1128 11:30:42.682966 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-z2zwx" event={"ID":"327ab34b-b6a6-4e7d-be85-933e09902a9a","Type":"ContainerStarted","Data":"bf8373a494df44be40186e2a4ba8091036c6ac51b9399c3cb2ed8b7ba05d699d"} Nov 28 11:30:42 crc kubenswrapper[4923]: I1128 11:30:42.707425 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-z2zwx" podStartSLOduration=2.707407623 podStartE2EDuration="2.707407623s" podCreationTimestamp="2025-11-28 11:30:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:30:42.704468561 +0000 UTC m=+1321.833152801" watchObservedRunningTime="2025-11-28 11:30:42.707407623 +0000 UTC m=+1321.836091833" Nov 28 11:30:43 crc kubenswrapper[4923]: I1128 11:30:43.158901 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:30:43 crc kubenswrapper[4923]: I1128 11:30:43.227301 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-zr9q6"] Nov 28 11:30:43 crc kubenswrapper[4923]: I1128 11:30:43.227526 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" podUID="82518235-c0d1-44e4-b4c9-811888c5d245" containerName="dnsmasq-dns" containerID="cri-o://34a1fdf8b8b78d9662d0775da5db012148144b78ea68b3bdbfdc243657f57f32" gracePeriod=10 Nov 28 11:30:43 crc kubenswrapper[4923]: I1128 11:30:43.695468 4923 generic.go:334] "Generic (PLEG): container finished" podID="82518235-c0d1-44e4-b4c9-811888c5d245" containerID="34a1fdf8b8b78d9662d0775da5db012148144b78ea68b3bdbfdc243657f57f32" exitCode=0 Nov 28 11:30:43 crc kubenswrapper[4923]: I1128 11:30:43.695959 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" event={"ID":"82518235-c0d1-44e4-b4c9-811888c5d245","Type":"ContainerDied","Data":"34a1fdf8b8b78d9662d0775da5db012148144b78ea68b3bdbfdc243657f57f32"} Nov 28 11:30:43 crc kubenswrapper[4923]: I1128 11:30:43.697507 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"70301603-f005-4e2f-90c9-6daedf3d09a9","Type":"ContainerStarted","Data":"18b0a9683a64d5e81984758e11f1b0f8ae2b4beeb301a57a15e6949c3dc53403"} Nov 28 11:30:43 crc kubenswrapper[4923]: I1128 11:30:43.808159 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:30:43 crc kubenswrapper[4923]: I1128 11:30:43.900774 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-ovsdbserver-sb\") pod \"82518235-c0d1-44e4-b4c9-811888c5d245\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " Nov 28 11:30:43 crc kubenswrapper[4923]: I1128 11:30:43.900837 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-ovsdbserver-nb\") pod \"82518235-c0d1-44e4-b4c9-811888c5d245\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " Nov 28 11:30:43 crc kubenswrapper[4923]: I1128 11:30:43.900884 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-dns-svc\") pod \"82518235-c0d1-44e4-b4c9-811888c5d245\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " Nov 28 11:30:43 crc kubenswrapper[4923]: I1128 11:30:43.900920 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-config\") pod \"82518235-c0d1-44e4-b4c9-811888c5d245\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " Nov 28 11:30:43 crc kubenswrapper[4923]: I1128 11:30:43.901024 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7kmr6\" (UniqueName: \"kubernetes.io/projected/82518235-c0d1-44e4-b4c9-811888c5d245-kube-api-access-7kmr6\") pod \"82518235-c0d1-44e4-b4c9-811888c5d245\" (UID: \"82518235-c0d1-44e4-b4c9-811888c5d245\") " Nov 28 11:30:43 crc kubenswrapper[4923]: I1128 11:30:43.909098 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82518235-c0d1-44e4-b4c9-811888c5d245-kube-api-access-7kmr6" (OuterVolumeSpecName: "kube-api-access-7kmr6") pod "82518235-c0d1-44e4-b4c9-811888c5d245" (UID: "82518235-c0d1-44e4-b4c9-811888c5d245"). InnerVolumeSpecName "kube-api-access-7kmr6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:30:43 crc kubenswrapper[4923]: I1128 11:30:43.950817 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "82518235-c0d1-44e4-b4c9-811888c5d245" (UID: "82518235-c0d1-44e4-b4c9-811888c5d245"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:30:43 crc kubenswrapper[4923]: I1128 11:30:43.954498 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "82518235-c0d1-44e4-b4c9-811888c5d245" (UID: "82518235-c0d1-44e4-b4c9-811888c5d245"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:30:43 crc kubenswrapper[4923]: I1128 11:30:43.962025 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "82518235-c0d1-44e4-b4c9-811888c5d245" (UID: "82518235-c0d1-44e4-b4c9-811888c5d245"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:30:43 crc kubenswrapper[4923]: I1128 11:30:43.974174 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-config" (OuterVolumeSpecName: "config") pod "82518235-c0d1-44e4-b4c9-811888c5d245" (UID: "82518235-c0d1-44e4-b4c9-811888c5d245"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:30:44 crc kubenswrapper[4923]: I1128 11:30:44.003193 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7kmr6\" (UniqueName: \"kubernetes.io/projected/82518235-c0d1-44e4-b4c9-811888c5d245-kube-api-access-7kmr6\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:44 crc kubenswrapper[4923]: I1128 11:30:44.003223 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:44 crc kubenswrapper[4923]: I1128 11:30:44.003234 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:44 crc kubenswrapper[4923]: I1128 11:30:44.003242 4923 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:44 crc kubenswrapper[4923]: I1128 11:30:44.003251 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82518235-c0d1-44e4-b4c9-811888c5d245-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:44 crc kubenswrapper[4923]: I1128 11:30:44.028487 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:30:44 crc kubenswrapper[4923]: I1128 11:30:44.028543 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:30:44 crc kubenswrapper[4923]: I1128 11:30:44.708241 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"70301603-f005-4e2f-90c9-6daedf3d09a9","Type":"ContainerStarted","Data":"d517810ac58728d03a7eba3058339bb717a30ebc1870f814d316a6eb01aef352"} Nov 28 11:30:44 crc kubenswrapper[4923]: I1128 11:30:44.710051 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" event={"ID":"82518235-c0d1-44e4-b4c9-811888c5d245","Type":"ContainerDied","Data":"60be3025fce828e7547e93c0d76b83cdf1bbea8e308ab992f96ca2e72eba9821"} Nov 28 11:30:44 crc kubenswrapper[4923]: I1128 11:30:44.710107 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" Nov 28 11:30:44 crc kubenswrapper[4923]: I1128 11:30:44.710117 4923 scope.go:117] "RemoveContainer" containerID="34a1fdf8b8b78d9662d0775da5db012148144b78ea68b3bdbfdc243657f57f32" Nov 28 11:30:44 crc kubenswrapper[4923]: I1128 11:30:44.734323 4923 scope.go:117] "RemoveContainer" containerID="13889e08b7cfb38a7844bae03d39123ebb3de645e328cd04528dbb09a10f0eb0" Nov 28 11:30:44 crc kubenswrapper[4923]: I1128 11:30:44.753068 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-zr9q6"] Nov 28 11:30:44 crc kubenswrapper[4923]: I1128 11:30:44.753464 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-zr9q6"] Nov 28 11:30:45 crc kubenswrapper[4923]: I1128 11:30:45.182823 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82518235-c0d1-44e4-b4c9-811888c5d245" path="/var/lib/kubelet/pods/82518235-c0d1-44e4-b4c9-811888c5d245/volumes" Nov 28 11:30:46 crc kubenswrapper[4923]: I1128 11:30:46.736563 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"70301603-f005-4e2f-90c9-6daedf3d09a9","Type":"ContainerStarted","Data":"021257d6ee38d71e68835142f32fd993621c3c77a18cd078f03717ea2b9365c5"} Nov 28 11:30:46 crc kubenswrapper[4923]: I1128 11:30:46.738009 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 11:30:46 crc kubenswrapper[4923]: I1128 11:30:46.762231 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.305037979 podStartE2EDuration="6.762217438s" podCreationTimestamp="2025-11-28 11:30:40 +0000 UTC" firstStartedPulling="2025-11-28 11:30:41.560672331 +0000 UTC m=+1320.689356551" lastFinishedPulling="2025-11-28 11:30:46.01785177 +0000 UTC m=+1325.146536010" observedRunningTime="2025-11-28 11:30:46.755808778 +0000 UTC m=+1325.884492988" watchObservedRunningTime="2025-11-28 11:30:46.762217438 +0000 UTC m=+1325.890901648" Nov 28 11:30:47 crc kubenswrapper[4923]: I1128 11:30:47.745737 4923 generic.go:334] "Generic (PLEG): container finished" podID="327ab34b-b6a6-4e7d-be85-933e09902a9a" containerID="c96f5654fa63d25dd422d3bae27f4ce71673d7e8b016ec7225a9b8070f0e5bf8" exitCode=0 Nov 28 11:30:47 crc kubenswrapper[4923]: I1128 11:30:47.745983 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-z2zwx" event={"ID":"327ab34b-b6a6-4e7d-be85-933e09902a9a","Type":"ContainerDied","Data":"c96f5654fa63d25dd422d3bae27f4ce71673d7e8b016ec7225a9b8070f0e5bf8"} Nov 28 11:30:48 crc kubenswrapper[4923]: I1128 11:30:48.593789 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-566b5b7845-zr9q6" podUID="82518235-c0d1-44e4-b4c9-811888c5d245" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.170:5353: i/o timeout" Nov 28 11:30:49 crc kubenswrapper[4923]: I1128 11:30:49.150143 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-z2zwx" Nov 28 11:30:49 crc kubenswrapper[4923]: I1128 11:30:49.344826 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/327ab34b-b6a6-4e7d-be85-933e09902a9a-scripts\") pod \"327ab34b-b6a6-4e7d-be85-933e09902a9a\" (UID: \"327ab34b-b6a6-4e7d-be85-933e09902a9a\") " Nov 28 11:30:49 crc kubenswrapper[4923]: I1128 11:30:49.345062 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhjql\" (UniqueName: \"kubernetes.io/projected/327ab34b-b6a6-4e7d-be85-933e09902a9a-kube-api-access-jhjql\") pod \"327ab34b-b6a6-4e7d-be85-933e09902a9a\" (UID: \"327ab34b-b6a6-4e7d-be85-933e09902a9a\") " Nov 28 11:30:49 crc kubenswrapper[4923]: I1128 11:30:49.345219 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/327ab34b-b6a6-4e7d-be85-933e09902a9a-config-data\") pod \"327ab34b-b6a6-4e7d-be85-933e09902a9a\" (UID: \"327ab34b-b6a6-4e7d-be85-933e09902a9a\") " Nov 28 11:30:49 crc kubenswrapper[4923]: I1128 11:30:49.345268 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/327ab34b-b6a6-4e7d-be85-933e09902a9a-combined-ca-bundle\") pod \"327ab34b-b6a6-4e7d-be85-933e09902a9a\" (UID: \"327ab34b-b6a6-4e7d-be85-933e09902a9a\") " Nov 28 11:30:49 crc kubenswrapper[4923]: I1128 11:30:49.349964 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/327ab34b-b6a6-4e7d-be85-933e09902a9a-kube-api-access-jhjql" (OuterVolumeSpecName: "kube-api-access-jhjql") pod "327ab34b-b6a6-4e7d-be85-933e09902a9a" (UID: "327ab34b-b6a6-4e7d-be85-933e09902a9a"). InnerVolumeSpecName "kube-api-access-jhjql". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:30:49 crc kubenswrapper[4923]: I1128 11:30:49.350992 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/327ab34b-b6a6-4e7d-be85-933e09902a9a-scripts" (OuterVolumeSpecName: "scripts") pod "327ab34b-b6a6-4e7d-be85-933e09902a9a" (UID: "327ab34b-b6a6-4e7d-be85-933e09902a9a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:49 crc kubenswrapper[4923]: I1128 11:30:49.380099 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/327ab34b-b6a6-4e7d-be85-933e09902a9a-config-data" (OuterVolumeSpecName: "config-data") pod "327ab34b-b6a6-4e7d-be85-933e09902a9a" (UID: "327ab34b-b6a6-4e7d-be85-933e09902a9a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:49 crc kubenswrapper[4923]: I1128 11:30:49.386269 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/327ab34b-b6a6-4e7d-be85-933e09902a9a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "327ab34b-b6a6-4e7d-be85-933e09902a9a" (UID: "327ab34b-b6a6-4e7d-be85-933e09902a9a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:49 crc kubenswrapper[4923]: I1128 11:30:49.447608 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhjql\" (UniqueName: \"kubernetes.io/projected/327ab34b-b6a6-4e7d-be85-933e09902a9a-kube-api-access-jhjql\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:49 crc kubenswrapper[4923]: I1128 11:30:49.447651 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/327ab34b-b6a6-4e7d-be85-933e09902a9a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:49 crc kubenswrapper[4923]: I1128 11:30:49.447664 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/327ab34b-b6a6-4e7d-be85-933e09902a9a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:49 crc kubenswrapper[4923]: I1128 11:30:49.447677 4923 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/327ab34b-b6a6-4e7d-be85-933e09902a9a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:49 crc kubenswrapper[4923]: I1128 11:30:49.769294 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-z2zwx" event={"ID":"327ab34b-b6a6-4e7d-be85-933e09902a9a","Type":"ContainerDied","Data":"bf8373a494df44be40186e2a4ba8091036c6ac51b9399c3cb2ed8b7ba05d699d"} Nov 28 11:30:49 crc kubenswrapper[4923]: I1128 11:30:49.769807 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf8373a494df44be40186e2a4ba8091036c6ac51b9399c3cb2ed8b7ba05d699d" Nov 28 11:30:49 crc kubenswrapper[4923]: I1128 11:30:49.769427 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-z2zwx" Nov 28 11:30:50 crc kubenswrapper[4923]: I1128 11:30:50.045464 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 11:30:50 crc kubenswrapper[4923]: I1128 11:30:50.048248 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 11:30:50 crc kubenswrapper[4923]: I1128 11:30:50.080975 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 11:30:50 crc kubenswrapper[4923]: I1128 11:30:50.094891 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 11:30:50 crc kubenswrapper[4923]: I1128 11:30:50.095162 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="e122279d-7bf5-41e7-b4c7-6c2edbdbe508" containerName="nova-scheduler-scheduler" containerID="cri-o://ab9b856988843d8eb2836c4eb74a23637769b564e01892f542c17fa625c03c19" gracePeriod=30 Nov 28 11:30:50 crc kubenswrapper[4923]: I1128 11:30:50.122386 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:30:50 crc kubenswrapper[4923]: I1128 11:30:50.122649 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="57d1e985-8768-421e-910e-a65c632dd5d3" containerName="nova-metadata-log" containerID="cri-o://7f54048d653272cb4fc96fe294624a60df9fc6909fa83caa550835186270ba20" gracePeriod=30 Nov 28 11:30:50 crc kubenswrapper[4923]: I1128 11:30:50.122796 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="57d1e985-8768-421e-910e-a65c632dd5d3" containerName="nova-metadata-metadata" containerID="cri-o://c30246725eaff2c585e36ce51e3b0eab9975a8a35f7258d575e8082ab1ae35d7" gracePeriod=30 Nov 28 11:30:50 crc kubenswrapper[4923]: I1128 11:30:50.778245 4923 generic.go:334] "Generic (PLEG): container finished" podID="57d1e985-8768-421e-910e-a65c632dd5d3" containerID="7f54048d653272cb4fc96fe294624a60df9fc6909fa83caa550835186270ba20" exitCode=143 Nov 28 11:30:50 crc kubenswrapper[4923]: I1128 11:30:50.779026 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"57d1e985-8768-421e-910e-a65c632dd5d3","Type":"ContainerDied","Data":"7f54048d653272cb4fc96fe294624a60df9fc6909fa83caa550835186270ba20"} Nov 28 11:30:51 crc kubenswrapper[4923]: I1128 11:30:51.063100 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.182:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 11:30:51 crc kubenswrapper[4923]: I1128 11:30:51.063551 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.182:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 11:30:51 crc kubenswrapper[4923]: I1128 11:30:51.785694 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" containerName="nova-api-log" containerID="cri-o://33a42edcf28b8e3567051f03ea9a0c4b0ab5953978066fab25862928e8a5921e" gracePeriod=30 Nov 28 11:30:51 crc kubenswrapper[4923]: I1128 11:30:51.785725 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" containerName="nova-api-api" containerID="cri-o://a7cba8f96f099c3af89230827f8db9f68878b8e3171ce10cfd9945bc3e0be8c6" gracePeriod=30 Nov 28 11:30:52 crc kubenswrapper[4923]: I1128 11:30:52.797320 4923 generic.go:334] "Generic (PLEG): container finished" podID="d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" containerID="33a42edcf28b8e3567051f03ea9a0c4b0ab5953978066fab25862928e8a5921e" exitCode=143 Nov 28 11:30:52 crc kubenswrapper[4923]: I1128 11:30:52.797397 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463","Type":"ContainerDied","Data":"33a42edcf28b8e3567051f03ea9a0c4b0ab5953978066fab25862928e8a5921e"} Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.279197 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="57d1e985-8768-421e-910e-a65c632dd5d3" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.176:8775/\": read tcp 10.217.0.2:43504->10.217.0.176:8775: read: connection reset by peer" Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.279373 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="57d1e985-8768-421e-910e-a65c632dd5d3" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.176:8775/\": read tcp 10.217.0.2:43512->10.217.0.176:8775: read: connection reset by peer" Nov 28 11:30:53 crc kubenswrapper[4923]: E1128 11:30:53.436395 4923 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod57d1e985_8768_421e_910e_a65c632dd5d3.slice/crio-c30246725eaff2c585e36ce51e3b0eab9975a8a35f7258d575e8082ab1ae35d7.scope\": RecentStats: unable to find data in memory cache]" Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.728917 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.811527 4923 generic.go:334] "Generic (PLEG): container finished" podID="57d1e985-8768-421e-910e-a65c632dd5d3" containerID="c30246725eaff2c585e36ce51e3b0eab9975a8a35f7258d575e8082ab1ae35d7" exitCode=0 Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.811610 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"57d1e985-8768-421e-910e-a65c632dd5d3","Type":"ContainerDied","Data":"c30246725eaff2c585e36ce51e3b0eab9975a8a35f7258d575e8082ab1ae35d7"} Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.813738 4923 generic.go:334] "Generic (PLEG): container finished" podID="e122279d-7bf5-41e7-b4c7-6c2edbdbe508" containerID="ab9b856988843d8eb2836c4eb74a23637769b564e01892f542c17fa625c03c19" exitCode=0 Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.813778 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e122279d-7bf5-41e7-b4c7-6c2edbdbe508","Type":"ContainerDied","Data":"ab9b856988843d8eb2836c4eb74a23637769b564e01892f542c17fa625c03c19"} Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.813786 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.813805 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e122279d-7bf5-41e7-b4c7-6c2edbdbe508","Type":"ContainerDied","Data":"8b686b1127a84d66631038f074163c744370a5b9c5f15b9c0b8e01ee1d9e36b6"} Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.813834 4923 scope.go:117] "RemoveContainer" containerID="ab9b856988843d8eb2836c4eb74a23637769b564e01892f542c17fa625c03c19" Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.842916 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e122279d-7bf5-41e7-b4c7-6c2edbdbe508-config-data\") pod \"e122279d-7bf5-41e7-b4c7-6c2edbdbe508\" (UID: \"e122279d-7bf5-41e7-b4c7-6c2edbdbe508\") " Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.843002 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e122279d-7bf5-41e7-b4c7-6c2edbdbe508-combined-ca-bundle\") pod \"e122279d-7bf5-41e7-b4c7-6c2edbdbe508\" (UID: \"e122279d-7bf5-41e7-b4c7-6c2edbdbe508\") " Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.843126 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x6gw4\" (UniqueName: \"kubernetes.io/projected/e122279d-7bf5-41e7-b4c7-6c2edbdbe508-kube-api-access-x6gw4\") pod \"e122279d-7bf5-41e7-b4c7-6c2edbdbe508\" (UID: \"e122279d-7bf5-41e7-b4c7-6c2edbdbe508\") " Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.852753 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e122279d-7bf5-41e7-b4c7-6c2edbdbe508-kube-api-access-x6gw4" (OuterVolumeSpecName: "kube-api-access-x6gw4") pod "e122279d-7bf5-41e7-b4c7-6c2edbdbe508" (UID: "e122279d-7bf5-41e7-b4c7-6c2edbdbe508"). InnerVolumeSpecName "kube-api-access-x6gw4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.853566 4923 scope.go:117] "RemoveContainer" containerID="ab9b856988843d8eb2836c4eb74a23637769b564e01892f542c17fa625c03c19" Nov 28 11:30:53 crc kubenswrapper[4923]: E1128 11:30:53.854123 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab9b856988843d8eb2836c4eb74a23637769b564e01892f542c17fa625c03c19\": container with ID starting with ab9b856988843d8eb2836c4eb74a23637769b564e01892f542c17fa625c03c19 not found: ID does not exist" containerID="ab9b856988843d8eb2836c4eb74a23637769b564e01892f542c17fa625c03c19" Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.854148 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab9b856988843d8eb2836c4eb74a23637769b564e01892f542c17fa625c03c19"} err="failed to get container status \"ab9b856988843d8eb2836c4eb74a23637769b564e01892f542c17fa625c03c19\": rpc error: code = NotFound desc = could not find container \"ab9b856988843d8eb2836c4eb74a23637769b564e01892f542c17fa625c03c19\": container with ID starting with ab9b856988843d8eb2836c4eb74a23637769b564e01892f542c17fa625c03c19 not found: ID does not exist" Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.894783 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e122279d-7bf5-41e7-b4c7-6c2edbdbe508-config-data" (OuterVolumeSpecName: "config-data") pod "e122279d-7bf5-41e7-b4c7-6c2edbdbe508" (UID: "e122279d-7bf5-41e7-b4c7-6c2edbdbe508"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.899115 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e122279d-7bf5-41e7-b4c7-6c2edbdbe508-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e122279d-7bf5-41e7-b4c7-6c2edbdbe508" (UID: "e122279d-7bf5-41e7-b4c7-6c2edbdbe508"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.946301 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x6gw4\" (UniqueName: \"kubernetes.io/projected/e122279d-7bf5-41e7-b4c7-6c2edbdbe508-kube-api-access-x6gw4\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.946344 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e122279d-7bf5-41e7-b4c7-6c2edbdbe508-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:53 crc kubenswrapper[4923]: I1128 11:30:53.946357 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e122279d-7bf5-41e7-b4c7-6c2edbdbe508-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.172132 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.194601 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.217977 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 11:30:54 crc kubenswrapper[4923]: E1128 11:30:54.218304 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="327ab34b-b6a6-4e7d-be85-933e09902a9a" containerName="nova-manage" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.218321 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="327ab34b-b6a6-4e7d-be85-933e09902a9a" containerName="nova-manage" Nov 28 11:30:54 crc kubenswrapper[4923]: E1128 11:30:54.218336 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82518235-c0d1-44e4-b4c9-811888c5d245" containerName="dnsmasq-dns" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.218342 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="82518235-c0d1-44e4-b4c9-811888c5d245" containerName="dnsmasq-dns" Nov 28 11:30:54 crc kubenswrapper[4923]: E1128 11:30:54.218351 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e122279d-7bf5-41e7-b4c7-6c2edbdbe508" containerName="nova-scheduler-scheduler" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.218357 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="e122279d-7bf5-41e7-b4c7-6c2edbdbe508" containerName="nova-scheduler-scheduler" Nov 28 11:30:54 crc kubenswrapper[4923]: E1128 11:30:54.218374 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82518235-c0d1-44e4-b4c9-811888c5d245" containerName="init" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.218380 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="82518235-c0d1-44e4-b4c9-811888c5d245" containerName="init" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.235985 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="327ab34b-b6a6-4e7d-be85-933e09902a9a" containerName="nova-manage" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.236040 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="82518235-c0d1-44e4-b4c9-811888c5d245" containerName="dnsmasq-dns" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.236074 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="e122279d-7bf5-41e7-b4c7-6c2edbdbe508" containerName="nova-scheduler-scheduler" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.237713 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.237812 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.244069 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.265455 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.357545 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/57d1e985-8768-421e-910e-a65c632dd5d3-nova-metadata-tls-certs\") pod \"57d1e985-8768-421e-910e-a65c632dd5d3\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.357607 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwpdp\" (UniqueName: \"kubernetes.io/projected/57d1e985-8768-421e-910e-a65c632dd5d3-kube-api-access-xwpdp\") pod \"57d1e985-8768-421e-910e-a65c632dd5d3\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.357642 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57d1e985-8768-421e-910e-a65c632dd5d3-logs\") pod \"57d1e985-8768-421e-910e-a65c632dd5d3\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.357695 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57d1e985-8768-421e-910e-a65c632dd5d3-combined-ca-bundle\") pod \"57d1e985-8768-421e-910e-a65c632dd5d3\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.357773 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57d1e985-8768-421e-910e-a65c632dd5d3-config-data\") pod \"57d1e985-8768-421e-910e-a65c632dd5d3\" (UID: \"57d1e985-8768-421e-910e-a65c632dd5d3\") " Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.358058 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57d1e985-8768-421e-910e-a65c632dd5d3-logs" (OuterVolumeSpecName: "logs") pod "57d1e985-8768-421e-910e-a65c632dd5d3" (UID: "57d1e985-8768-421e-910e-a65c632dd5d3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.358068 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ae9e9bf-102d-4abe-8adc-6720d53d5ebf-config-data\") pod \"nova-scheduler-0\" (UID: \"9ae9e9bf-102d-4abe-8adc-6720d53d5ebf\") " pod="openstack/nova-scheduler-0" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.358249 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ae9e9bf-102d-4abe-8adc-6720d53d5ebf-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9ae9e9bf-102d-4abe-8adc-6720d53d5ebf\") " pod="openstack/nova-scheduler-0" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.358379 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnnvf\" (UniqueName: \"kubernetes.io/projected/9ae9e9bf-102d-4abe-8adc-6720d53d5ebf-kube-api-access-gnnvf\") pod \"nova-scheduler-0\" (UID: \"9ae9e9bf-102d-4abe-8adc-6720d53d5ebf\") " pod="openstack/nova-scheduler-0" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.358570 4923 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57d1e985-8768-421e-910e-a65c632dd5d3-logs\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.362774 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57d1e985-8768-421e-910e-a65c632dd5d3-kube-api-access-xwpdp" (OuterVolumeSpecName: "kube-api-access-xwpdp") pod "57d1e985-8768-421e-910e-a65c632dd5d3" (UID: "57d1e985-8768-421e-910e-a65c632dd5d3"). InnerVolumeSpecName "kube-api-access-xwpdp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.398097 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57d1e985-8768-421e-910e-a65c632dd5d3-config-data" (OuterVolumeSpecName: "config-data") pod "57d1e985-8768-421e-910e-a65c632dd5d3" (UID: "57d1e985-8768-421e-910e-a65c632dd5d3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.406096 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57d1e985-8768-421e-910e-a65c632dd5d3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "57d1e985-8768-421e-910e-a65c632dd5d3" (UID: "57d1e985-8768-421e-910e-a65c632dd5d3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.435912 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57d1e985-8768-421e-910e-a65c632dd5d3-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "57d1e985-8768-421e-910e-a65c632dd5d3" (UID: "57d1e985-8768-421e-910e-a65c632dd5d3"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.459970 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ae9e9bf-102d-4abe-8adc-6720d53d5ebf-config-data\") pod \"nova-scheduler-0\" (UID: \"9ae9e9bf-102d-4abe-8adc-6720d53d5ebf\") " pod="openstack/nova-scheduler-0" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.460043 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ae9e9bf-102d-4abe-8adc-6720d53d5ebf-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9ae9e9bf-102d-4abe-8adc-6720d53d5ebf\") " pod="openstack/nova-scheduler-0" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.460083 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnnvf\" (UniqueName: \"kubernetes.io/projected/9ae9e9bf-102d-4abe-8adc-6720d53d5ebf-kube-api-access-gnnvf\") pod \"nova-scheduler-0\" (UID: \"9ae9e9bf-102d-4abe-8adc-6720d53d5ebf\") " pod="openstack/nova-scheduler-0" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.460165 4923 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/57d1e985-8768-421e-910e-a65c632dd5d3-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.460177 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwpdp\" (UniqueName: \"kubernetes.io/projected/57d1e985-8768-421e-910e-a65c632dd5d3-kube-api-access-xwpdp\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.460187 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57d1e985-8768-421e-910e-a65c632dd5d3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.460195 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57d1e985-8768-421e-910e-a65c632dd5d3-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.474736 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ae9e9bf-102d-4abe-8adc-6720d53d5ebf-config-data\") pod \"nova-scheduler-0\" (UID: \"9ae9e9bf-102d-4abe-8adc-6720d53d5ebf\") " pod="openstack/nova-scheduler-0" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.475997 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ae9e9bf-102d-4abe-8adc-6720d53d5ebf-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9ae9e9bf-102d-4abe-8adc-6720d53d5ebf\") " pod="openstack/nova-scheduler-0" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.485661 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnnvf\" (UniqueName: \"kubernetes.io/projected/9ae9e9bf-102d-4abe-8adc-6720d53d5ebf-kube-api-access-gnnvf\") pod \"nova-scheduler-0\" (UID: \"9ae9e9bf-102d-4abe-8adc-6720d53d5ebf\") " pod="openstack/nova-scheduler-0" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.562078 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.823735 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"57d1e985-8768-421e-910e-a65c632dd5d3","Type":"ContainerDied","Data":"eafeebf16f656652224a40f97fa6cf55240cf4223528e443d2f6279201f3d1ca"} Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.824033 4923 scope.go:117] "RemoveContainer" containerID="c30246725eaff2c585e36ce51e3b0eab9975a8a35f7258d575e8082ab1ae35d7" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.823801 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.873195 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.884130 4923 scope.go:117] "RemoveContainer" containerID="7f54048d653272cb4fc96fe294624a60df9fc6909fa83caa550835186270ba20" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.913505 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.927005 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:30:54 crc kubenswrapper[4923]: E1128 11:30:54.927430 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57d1e985-8768-421e-910e-a65c632dd5d3" containerName="nova-metadata-metadata" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.927443 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="57d1e985-8768-421e-910e-a65c632dd5d3" containerName="nova-metadata-metadata" Nov 28 11:30:54 crc kubenswrapper[4923]: E1128 11:30:54.927476 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57d1e985-8768-421e-910e-a65c632dd5d3" containerName="nova-metadata-log" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.927482 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="57d1e985-8768-421e-910e-a65c632dd5d3" containerName="nova-metadata-log" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.927649 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="57d1e985-8768-421e-910e-a65c632dd5d3" containerName="nova-metadata-metadata" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.927666 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="57d1e985-8768-421e-910e-a65c632dd5d3" containerName="nova-metadata-log" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.928608 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.931278 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.931632 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 11:30:54 crc kubenswrapper[4923]: I1128 11:30:54.948675 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.069083 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8128758e-04b7-4c2c-abdc-c8c024262381-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8128758e-04b7-4c2c-abdc-c8c024262381\") " pod="openstack/nova-metadata-0" Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.069835 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8128758e-04b7-4c2c-abdc-c8c024262381-config-data\") pod \"nova-metadata-0\" (UID: \"8128758e-04b7-4c2c-abdc-c8c024262381\") " pod="openstack/nova-metadata-0" Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.069918 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8128758e-04b7-4c2c-abdc-c8c024262381-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8128758e-04b7-4c2c-abdc-c8c024262381\") " pod="openstack/nova-metadata-0" Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.070014 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8128758e-04b7-4c2c-abdc-c8c024262381-logs\") pod \"nova-metadata-0\" (UID: \"8128758e-04b7-4c2c-abdc-c8c024262381\") " pod="openstack/nova-metadata-0" Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.070083 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xgcd\" (UniqueName: \"kubernetes.io/projected/8128758e-04b7-4c2c-abdc-c8c024262381-kube-api-access-2xgcd\") pod \"nova-metadata-0\" (UID: \"8128758e-04b7-4c2c-abdc-c8c024262381\") " pod="openstack/nova-metadata-0" Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.099435 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.171797 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8128758e-04b7-4c2c-abdc-c8c024262381-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8128758e-04b7-4c2c-abdc-c8c024262381\") " pod="openstack/nova-metadata-0" Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.171837 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8128758e-04b7-4c2c-abdc-c8c024262381-logs\") pod \"nova-metadata-0\" (UID: \"8128758e-04b7-4c2c-abdc-c8c024262381\") " pod="openstack/nova-metadata-0" Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.171864 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xgcd\" (UniqueName: \"kubernetes.io/projected/8128758e-04b7-4c2c-abdc-c8c024262381-kube-api-access-2xgcd\") pod \"nova-metadata-0\" (UID: \"8128758e-04b7-4c2c-abdc-c8c024262381\") " pod="openstack/nova-metadata-0" Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.171993 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8128758e-04b7-4c2c-abdc-c8c024262381-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8128758e-04b7-4c2c-abdc-c8c024262381\") " pod="openstack/nova-metadata-0" Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.172052 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8128758e-04b7-4c2c-abdc-c8c024262381-config-data\") pod \"nova-metadata-0\" (UID: \"8128758e-04b7-4c2c-abdc-c8c024262381\") " pod="openstack/nova-metadata-0" Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.174222 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8128758e-04b7-4c2c-abdc-c8c024262381-logs\") pod \"nova-metadata-0\" (UID: \"8128758e-04b7-4c2c-abdc-c8c024262381\") " pod="openstack/nova-metadata-0" Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.176424 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8128758e-04b7-4c2c-abdc-c8c024262381-config-data\") pod \"nova-metadata-0\" (UID: \"8128758e-04b7-4c2c-abdc-c8c024262381\") " pod="openstack/nova-metadata-0" Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.178636 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8128758e-04b7-4c2c-abdc-c8c024262381-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8128758e-04b7-4c2c-abdc-c8c024262381\") " pod="openstack/nova-metadata-0" Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.179570 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57d1e985-8768-421e-910e-a65c632dd5d3" path="/var/lib/kubelet/pods/57d1e985-8768-421e-910e-a65c632dd5d3/volumes" Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.180144 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e122279d-7bf5-41e7-b4c7-6c2edbdbe508" path="/var/lib/kubelet/pods/e122279d-7bf5-41e7-b4c7-6c2edbdbe508/volumes" Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.189028 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8128758e-04b7-4c2c-abdc-c8c024262381-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8128758e-04b7-4c2c-abdc-c8c024262381\") " pod="openstack/nova-metadata-0" Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.194912 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xgcd\" (UniqueName: \"kubernetes.io/projected/8128758e-04b7-4c2c-abdc-c8c024262381-kube-api-access-2xgcd\") pod \"nova-metadata-0\" (UID: \"8128758e-04b7-4c2c-abdc-c8c024262381\") " pod="openstack/nova-metadata-0" Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.250671 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.738260 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 11:30:55 crc kubenswrapper[4923]: W1128 11:30:55.769233 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8128758e_04b7_4c2c_abdc_c8c024262381.slice/crio-6781e0a19ecb8a4878debe9689ac8d2b480fda0e94e3e48d3ca6b8030d413735 WatchSource:0}: Error finding container 6781e0a19ecb8a4878debe9689ac8d2b480fda0e94e3e48d3ca6b8030d413735: Status 404 returned error can't find the container with id 6781e0a19ecb8a4878debe9689ac8d2b480fda0e94e3e48d3ca6b8030d413735 Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.837570 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9ae9e9bf-102d-4abe-8adc-6720d53d5ebf","Type":"ContainerStarted","Data":"303bd9afe18d73def89b95371ba4f38ad3e8cee832480f927b71a0c029c061e9"} Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.837614 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9ae9e9bf-102d-4abe-8adc-6720d53d5ebf","Type":"ContainerStarted","Data":"bffe745b73a2108f05152869768eef756a0e7fe3e758d75a71238ddd4141f793"} Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.849130 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8128758e-04b7-4c2c-abdc-c8c024262381","Type":"ContainerStarted","Data":"6781e0a19ecb8a4878debe9689ac8d2b480fda0e94e3e48d3ca6b8030d413735"} Nov 28 11:30:55 crc kubenswrapper[4923]: I1128 11:30:55.864853 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.864836955 podStartE2EDuration="1.864836955s" podCreationTimestamp="2025-11-28 11:30:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:30:55.857132669 +0000 UTC m=+1334.985816889" watchObservedRunningTime="2025-11-28 11:30:55.864836955 +0000 UTC m=+1334.993521165" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.544472 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.707184 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-config-data\") pod \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.707436 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-internal-tls-certs\") pod \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.708115 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-combined-ca-bundle\") pod \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.708416 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-logs\") pod \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.708539 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-public-tls-certs\") pod \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.708608 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66j98\" (UniqueName: \"kubernetes.io/projected/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-kube-api-access-66j98\") pod \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\" (UID: \"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463\") " Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.709474 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-logs" (OuterVolumeSpecName: "logs") pod "d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" (UID: "d836f6d9-f12f-4fd2-b84d-3ace0ffa8463"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.714315 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-kube-api-access-66j98" (OuterVolumeSpecName: "kube-api-access-66j98") pod "d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" (UID: "d836f6d9-f12f-4fd2-b84d-3ace0ffa8463"). InnerVolumeSpecName "kube-api-access-66j98". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.740148 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" (UID: "d836f6d9-f12f-4fd2-b84d-3ace0ffa8463"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.790724 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-config-data" (OuterVolumeSpecName: "config-data") pod "d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" (UID: "d836f6d9-f12f-4fd2-b84d-3ace0ffa8463"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.791509 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" (UID: "d836f6d9-f12f-4fd2-b84d-3ace0ffa8463"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.798599 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" (UID: "d836f6d9-f12f-4fd2-b84d-3ace0ffa8463"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.810955 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.810982 4923 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.810993 4923 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.811002 4923 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-logs\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.811010 4923 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.811018 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66j98\" (UniqueName: \"kubernetes.io/projected/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463-kube-api-access-66j98\") on node \"crc\" DevicePath \"\"" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.861815 4923 generic.go:334] "Generic (PLEG): container finished" podID="d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" containerID="a7cba8f96f099c3af89230827f8db9f68878b8e3171ce10cfd9945bc3e0be8c6" exitCode=0 Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.861867 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.861893 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463","Type":"ContainerDied","Data":"a7cba8f96f099c3af89230827f8db9f68878b8e3171ce10cfd9945bc3e0be8c6"} Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.861919 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d836f6d9-f12f-4fd2-b84d-3ace0ffa8463","Type":"ContainerDied","Data":"ed7d390949cac379049aa814e45535885879f26c28b6660e89564aa775fbf798"} Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.861954 4923 scope.go:117] "RemoveContainer" containerID="a7cba8f96f099c3af89230827f8db9f68878b8e3171ce10cfd9945bc3e0be8c6" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.881616 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8128758e-04b7-4c2c-abdc-c8c024262381","Type":"ContainerStarted","Data":"0513486ca72bf698694b157b3828dfe860d7af21d197f55f222a19f5d76160ae"} Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.881643 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8128758e-04b7-4c2c-abdc-c8c024262381","Type":"ContainerStarted","Data":"ae0111ff9556c082d81a73bb8a9a0531f201ead2cb040c1502b35ccc6e38cdfe"} Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.906322 4923 scope.go:117] "RemoveContainer" containerID="33a42edcf28b8e3567051f03ea9a0c4b0ab5953978066fab25862928e8a5921e" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.911021 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.911002672 podStartE2EDuration="2.911002672s" podCreationTimestamp="2025-11-28 11:30:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:30:56.906441294 +0000 UTC m=+1336.035125504" watchObservedRunningTime="2025-11-28 11:30:56.911002672 +0000 UTC m=+1336.039686882" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.926760 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.928468 4923 scope.go:117] "RemoveContainer" containerID="a7cba8f96f099c3af89230827f8db9f68878b8e3171ce10cfd9945bc3e0be8c6" Nov 28 11:30:56 crc kubenswrapper[4923]: E1128 11:30:56.929008 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7cba8f96f099c3af89230827f8db9f68878b8e3171ce10cfd9945bc3e0be8c6\": container with ID starting with a7cba8f96f099c3af89230827f8db9f68878b8e3171ce10cfd9945bc3e0be8c6 not found: ID does not exist" containerID="a7cba8f96f099c3af89230827f8db9f68878b8e3171ce10cfd9945bc3e0be8c6" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.929041 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7cba8f96f099c3af89230827f8db9f68878b8e3171ce10cfd9945bc3e0be8c6"} err="failed to get container status \"a7cba8f96f099c3af89230827f8db9f68878b8e3171ce10cfd9945bc3e0be8c6\": rpc error: code = NotFound desc = could not find container \"a7cba8f96f099c3af89230827f8db9f68878b8e3171ce10cfd9945bc3e0be8c6\": container with ID starting with a7cba8f96f099c3af89230827f8db9f68878b8e3171ce10cfd9945bc3e0be8c6 not found: ID does not exist" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.929066 4923 scope.go:117] "RemoveContainer" containerID="33a42edcf28b8e3567051f03ea9a0c4b0ab5953978066fab25862928e8a5921e" Nov 28 11:30:56 crc kubenswrapper[4923]: E1128 11:30:56.929493 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33a42edcf28b8e3567051f03ea9a0c4b0ab5953978066fab25862928e8a5921e\": container with ID starting with 33a42edcf28b8e3567051f03ea9a0c4b0ab5953978066fab25862928e8a5921e not found: ID does not exist" containerID="33a42edcf28b8e3567051f03ea9a0c4b0ab5953978066fab25862928e8a5921e" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.929528 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33a42edcf28b8e3567051f03ea9a0c4b0ab5953978066fab25862928e8a5921e"} err="failed to get container status \"33a42edcf28b8e3567051f03ea9a0c4b0ab5953978066fab25862928e8a5921e\": rpc error: code = NotFound desc = could not find container \"33a42edcf28b8e3567051f03ea9a0c4b0ab5953978066fab25862928e8a5921e\": container with ID starting with 33a42edcf28b8e3567051f03ea9a0c4b0ab5953978066fab25862928e8a5921e not found: ID does not exist" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.955555 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.994415 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 11:30:56 crc kubenswrapper[4923]: E1128 11:30:56.995044 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" containerName="nova-api-api" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.995062 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" containerName="nova-api-api" Nov 28 11:30:56 crc kubenswrapper[4923]: E1128 11:30:56.995094 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" containerName="nova-api-log" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.995102 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" containerName="nova-api-log" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.995385 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" containerName="nova-api-log" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.995410 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" containerName="nova-api-api" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.996702 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 11:30:56 crc kubenswrapper[4923]: I1128 11:30:56.999618 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.000538 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.000772 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.016501 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.123239 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/89a1a42c-f7a9-448a-87aa-8ea85a021f46-public-tls-certs\") pod \"nova-api-0\" (UID: \"89a1a42c-f7a9-448a-87aa-8ea85a021f46\") " pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.123448 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/89a1a42c-f7a9-448a-87aa-8ea85a021f46-internal-tls-certs\") pod \"nova-api-0\" (UID: \"89a1a42c-f7a9-448a-87aa-8ea85a021f46\") " pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.123559 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89a1a42c-f7a9-448a-87aa-8ea85a021f46-config-data\") pod \"nova-api-0\" (UID: \"89a1a42c-f7a9-448a-87aa-8ea85a021f46\") " pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.123604 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89a1a42c-f7a9-448a-87aa-8ea85a021f46-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"89a1a42c-f7a9-448a-87aa-8ea85a021f46\") " pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.123646 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcrpz\" (UniqueName: \"kubernetes.io/projected/89a1a42c-f7a9-448a-87aa-8ea85a021f46-kube-api-access-bcrpz\") pod \"nova-api-0\" (UID: \"89a1a42c-f7a9-448a-87aa-8ea85a021f46\") " pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.123819 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89a1a42c-f7a9-448a-87aa-8ea85a021f46-logs\") pod \"nova-api-0\" (UID: \"89a1a42c-f7a9-448a-87aa-8ea85a021f46\") " pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.179889 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d836f6d9-f12f-4fd2-b84d-3ace0ffa8463" path="/var/lib/kubelet/pods/d836f6d9-f12f-4fd2-b84d-3ace0ffa8463/volumes" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.226075 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89a1a42c-f7a9-448a-87aa-8ea85a021f46-logs\") pod \"nova-api-0\" (UID: \"89a1a42c-f7a9-448a-87aa-8ea85a021f46\") " pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.226138 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/89a1a42c-f7a9-448a-87aa-8ea85a021f46-public-tls-certs\") pod \"nova-api-0\" (UID: \"89a1a42c-f7a9-448a-87aa-8ea85a021f46\") " pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.226202 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/89a1a42c-f7a9-448a-87aa-8ea85a021f46-internal-tls-certs\") pod \"nova-api-0\" (UID: \"89a1a42c-f7a9-448a-87aa-8ea85a021f46\") " pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.226243 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89a1a42c-f7a9-448a-87aa-8ea85a021f46-config-data\") pod \"nova-api-0\" (UID: \"89a1a42c-f7a9-448a-87aa-8ea85a021f46\") " pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.226270 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89a1a42c-f7a9-448a-87aa-8ea85a021f46-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"89a1a42c-f7a9-448a-87aa-8ea85a021f46\") " pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.226285 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcrpz\" (UniqueName: \"kubernetes.io/projected/89a1a42c-f7a9-448a-87aa-8ea85a021f46-kube-api-access-bcrpz\") pod \"nova-api-0\" (UID: \"89a1a42c-f7a9-448a-87aa-8ea85a021f46\") " pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.227084 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89a1a42c-f7a9-448a-87aa-8ea85a021f46-logs\") pod \"nova-api-0\" (UID: \"89a1a42c-f7a9-448a-87aa-8ea85a021f46\") " pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.231843 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/89a1a42c-f7a9-448a-87aa-8ea85a021f46-public-tls-certs\") pod \"nova-api-0\" (UID: \"89a1a42c-f7a9-448a-87aa-8ea85a021f46\") " pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.232334 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89a1a42c-f7a9-448a-87aa-8ea85a021f46-config-data\") pod \"nova-api-0\" (UID: \"89a1a42c-f7a9-448a-87aa-8ea85a021f46\") " pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.236993 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89a1a42c-f7a9-448a-87aa-8ea85a021f46-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"89a1a42c-f7a9-448a-87aa-8ea85a021f46\") " pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.237826 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/89a1a42c-f7a9-448a-87aa-8ea85a021f46-internal-tls-certs\") pod \"nova-api-0\" (UID: \"89a1a42c-f7a9-448a-87aa-8ea85a021f46\") " pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.244350 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcrpz\" (UniqueName: \"kubernetes.io/projected/89a1a42c-f7a9-448a-87aa-8ea85a021f46-kube-api-access-bcrpz\") pod \"nova-api-0\" (UID: \"89a1a42c-f7a9-448a-87aa-8ea85a021f46\") " pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.321548 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.794157 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 11:30:57 crc kubenswrapper[4923]: W1128 11:30:57.797063 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod89a1a42c_f7a9_448a_87aa_8ea85a021f46.slice/crio-4c3584a146989e483a875a4220a0aa095dce8b91b15ac7487a3cfb3393c1c309 WatchSource:0}: Error finding container 4c3584a146989e483a875a4220a0aa095dce8b91b15ac7487a3cfb3393c1c309: Status 404 returned error can't find the container with id 4c3584a146989e483a875a4220a0aa095dce8b91b15ac7487a3cfb3393c1c309 Nov 28 11:30:57 crc kubenswrapper[4923]: I1128 11:30:57.897338 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"89a1a42c-f7a9-448a-87aa-8ea85a021f46","Type":"ContainerStarted","Data":"4c3584a146989e483a875a4220a0aa095dce8b91b15ac7487a3cfb3393c1c309"} Nov 28 11:30:58 crc kubenswrapper[4923]: I1128 11:30:58.907659 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"89a1a42c-f7a9-448a-87aa-8ea85a021f46","Type":"ContainerStarted","Data":"0f506b0be33f3e307154f8d43a9f15410d7e67023ccbf45e3bb083b15ca79910"} Nov 28 11:30:58 crc kubenswrapper[4923]: I1128 11:30:58.908029 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"89a1a42c-f7a9-448a-87aa-8ea85a021f46","Type":"ContainerStarted","Data":"9008326044be320034cd8211c66c3e9d31d014d1b6017c77069bd329b7ac51f7"} Nov 28 11:30:59 crc kubenswrapper[4923]: I1128 11:30:59.562609 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 11:31:00 crc kubenswrapper[4923]: I1128 11:31:00.251189 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 11:31:00 crc kubenswrapper[4923]: I1128 11:31:00.251244 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 11:31:04 crc kubenswrapper[4923]: I1128 11:31:04.563706 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 11:31:04 crc kubenswrapper[4923]: I1128 11:31:04.589074 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 11:31:04 crc kubenswrapper[4923]: I1128 11:31:04.634618 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=8.634595813 podStartE2EDuration="8.634595813s" podCreationTimestamp="2025-11-28 11:30:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:30:58.928451821 +0000 UTC m=+1338.057136061" watchObservedRunningTime="2025-11-28 11:31:04.634595813 +0000 UTC m=+1343.763280033" Nov 28 11:31:05 crc kubenswrapper[4923]: I1128 11:31:05.035765 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 11:31:05 crc kubenswrapper[4923]: I1128 11:31:05.250990 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 11:31:05 crc kubenswrapper[4923]: I1128 11:31:05.251069 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 11:31:06 crc kubenswrapper[4923]: I1128 11:31:06.267255 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="8128758e-04b7-4c2c-abdc-c8c024262381" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.186:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 11:31:06 crc kubenswrapper[4923]: I1128 11:31:06.267272 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="8128758e-04b7-4c2c-abdc-c8c024262381" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.186:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 11:31:07 crc kubenswrapper[4923]: I1128 11:31:07.322055 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 11:31:07 crc kubenswrapper[4923]: I1128 11:31:07.322101 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 11:31:08 crc kubenswrapper[4923]: I1128 11:31:08.338108 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="89a1a42c-f7a9-448a-87aa-8ea85a021f46" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.187:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 11:31:08 crc kubenswrapper[4923]: I1128 11:31:08.338156 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="89a1a42c-f7a9-448a-87aa-8ea85a021f46" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.187:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 11:31:11 crc kubenswrapper[4923]: I1128 11:31:11.041947 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 11:31:14 crc kubenswrapper[4923]: I1128 11:31:14.026744 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:31:14 crc kubenswrapper[4923]: I1128 11:31:14.027134 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:31:15 crc kubenswrapper[4923]: I1128 11:31:15.260847 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 11:31:15 crc kubenswrapper[4923]: I1128 11:31:15.262067 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 11:31:15 crc kubenswrapper[4923]: I1128 11:31:15.270882 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 11:31:15 crc kubenswrapper[4923]: I1128 11:31:15.271551 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 11:31:17 crc kubenswrapper[4923]: I1128 11:31:17.332827 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 11:31:17 crc kubenswrapper[4923]: I1128 11:31:17.335006 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 11:31:17 crc kubenswrapper[4923]: I1128 11:31:17.341399 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 11:31:17 crc kubenswrapper[4923]: I1128 11:31:17.347379 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 11:31:18 crc kubenswrapper[4923]: I1128 11:31:18.146484 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 11:31:18 crc kubenswrapper[4923]: I1128 11:31:18.345674 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 11:31:26 crc kubenswrapper[4923]: I1128 11:31:26.661570 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 11:31:28 crc kubenswrapper[4923]: I1128 11:31:28.145025 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 11:31:31 crc kubenswrapper[4923]: I1128 11:31:31.197783 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="26a4b167-a30a-4655-80aa-2177fe14784c" containerName="rabbitmq" containerID="cri-o://15ed96e1e9c6a52ee3fa69e21deee536fed7f72518ec9117db061e51c643648e" gracePeriod=604796 Nov 28 11:31:32 crc kubenswrapper[4923]: I1128 11:31:32.572854 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="456d70c2-443b-455b-83fe-fc87e36534ac" containerName="rabbitmq" containerID="cri-o://0adef79547a29cf58c840ff6fe32e02579298f45c778018b82629cb1f6d2e4e8" gracePeriod=604796 Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.493328 4923 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="456d70c2-443b-455b-83fe-fc87e36534ac" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.98:5671: connect: connection refused" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.724876 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.854283 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-confd\") pod \"26a4b167-a30a-4655-80aa-2177fe14784c\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.854328 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-erlang-cookie\") pod \"26a4b167-a30a-4655-80aa-2177fe14784c\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.854415 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/26a4b167-a30a-4655-80aa-2177fe14784c-server-conf\") pod \"26a4b167-a30a-4655-80aa-2177fe14784c\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.854444 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"26a4b167-a30a-4655-80aa-2177fe14784c\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.854461 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/26a4b167-a30a-4655-80aa-2177fe14784c-erlang-cookie-secret\") pod \"26a4b167-a30a-4655-80aa-2177fe14784c\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.854494 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xkpzp\" (UniqueName: \"kubernetes.io/projected/26a4b167-a30a-4655-80aa-2177fe14784c-kube-api-access-xkpzp\") pod \"26a4b167-a30a-4655-80aa-2177fe14784c\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.854533 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-tls\") pod \"26a4b167-a30a-4655-80aa-2177fe14784c\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.854603 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/26a4b167-a30a-4655-80aa-2177fe14784c-pod-info\") pod \"26a4b167-a30a-4655-80aa-2177fe14784c\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.854626 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/26a4b167-a30a-4655-80aa-2177fe14784c-config-data\") pod \"26a4b167-a30a-4655-80aa-2177fe14784c\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.854644 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-plugins\") pod \"26a4b167-a30a-4655-80aa-2177fe14784c\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.854670 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/26a4b167-a30a-4655-80aa-2177fe14784c-plugins-conf\") pod \"26a4b167-a30a-4655-80aa-2177fe14784c\" (UID: \"26a4b167-a30a-4655-80aa-2177fe14784c\") " Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.855588 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26a4b167-a30a-4655-80aa-2177fe14784c-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "26a4b167-a30a-4655-80aa-2177fe14784c" (UID: "26a4b167-a30a-4655-80aa-2177fe14784c"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.859308 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "26a4b167-a30a-4655-80aa-2177fe14784c" (UID: "26a4b167-a30a-4655-80aa-2177fe14784c"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.863175 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "26a4b167-a30a-4655-80aa-2177fe14784c" (UID: "26a4b167-a30a-4655-80aa-2177fe14784c"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.867345 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26a4b167-a30a-4655-80aa-2177fe14784c-kube-api-access-xkpzp" (OuterVolumeSpecName: "kube-api-access-xkpzp") pod "26a4b167-a30a-4655-80aa-2177fe14784c" (UID: "26a4b167-a30a-4655-80aa-2177fe14784c"). InnerVolumeSpecName "kube-api-access-xkpzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.867418 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26a4b167-a30a-4655-80aa-2177fe14784c-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "26a4b167-a30a-4655-80aa-2177fe14784c" (UID: "26a4b167-a30a-4655-80aa-2177fe14784c"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.887257 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/26a4b167-a30a-4655-80aa-2177fe14784c-pod-info" (OuterVolumeSpecName: "pod-info") pod "26a4b167-a30a-4655-80aa-2177fe14784c" (UID: "26a4b167-a30a-4655-80aa-2177fe14784c"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.887279 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "26a4b167-a30a-4655-80aa-2177fe14784c" (UID: "26a4b167-a30a-4655-80aa-2177fe14784c"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.899139 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "26a4b167-a30a-4655-80aa-2177fe14784c" (UID: "26a4b167-a30a-4655-80aa-2177fe14784c"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.931819 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26a4b167-a30a-4655-80aa-2177fe14784c-config-data" (OuterVolumeSpecName: "config-data") pod "26a4b167-a30a-4655-80aa-2177fe14784c" (UID: "26a4b167-a30a-4655-80aa-2177fe14784c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.963983 4923 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/26a4b167-a30a-4655-80aa-2177fe14784c-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.964234 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/26a4b167-a30a-4655-80aa-2177fe14784c-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.964245 4923 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.964254 4923 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/26a4b167-a30a-4655-80aa-2177fe14784c-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.964264 4923 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.964286 4923 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.964295 4923 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/26a4b167-a30a-4655-80aa-2177fe14784c-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.964303 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xkpzp\" (UniqueName: \"kubernetes.io/projected/26a4b167-a30a-4655-80aa-2177fe14784c-kube-api-access-xkpzp\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.964312 4923 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:37 crc kubenswrapper[4923]: I1128 11:31:37.979109 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26a4b167-a30a-4655-80aa-2177fe14784c-server-conf" (OuterVolumeSpecName: "server-conf") pod "26a4b167-a30a-4655-80aa-2177fe14784c" (UID: "26a4b167-a30a-4655-80aa-2177fe14784c"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.001237 4923 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.016178 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "26a4b167-a30a-4655-80aa-2177fe14784c" (UID: "26a4b167-a30a-4655-80aa-2177fe14784c"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.070952 4923 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/26a4b167-a30a-4655-80aa-2177fe14784c-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.070986 4923 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.070996 4923 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/26a4b167-a30a-4655-80aa-2177fe14784c-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.336446 4923 generic.go:334] "Generic (PLEG): container finished" podID="26a4b167-a30a-4655-80aa-2177fe14784c" containerID="15ed96e1e9c6a52ee3fa69e21deee536fed7f72518ec9117db061e51c643648e" exitCode=0 Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.336487 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"26a4b167-a30a-4655-80aa-2177fe14784c","Type":"ContainerDied","Data":"15ed96e1e9c6a52ee3fa69e21deee536fed7f72518ec9117db061e51c643648e"} Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.336513 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"26a4b167-a30a-4655-80aa-2177fe14784c","Type":"ContainerDied","Data":"4e605008a9e35bfdf40121092457048041106e5fe3729ef178959cc3b538e626"} Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.336520 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.336530 4923 scope.go:117] "RemoveContainer" containerID="15ed96e1e9c6a52ee3fa69e21deee536fed7f72518ec9117db061e51c643648e" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.358861 4923 scope.go:117] "RemoveContainer" containerID="877c6e8210bfbbb050a57173fa72769c5cca178fe72691fd5da642acdfd3f260" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.373415 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.397495 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.404221 4923 scope.go:117] "RemoveContainer" containerID="15ed96e1e9c6a52ee3fa69e21deee536fed7f72518ec9117db061e51c643648e" Nov 28 11:31:38 crc kubenswrapper[4923]: E1128 11:31:38.406342 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15ed96e1e9c6a52ee3fa69e21deee536fed7f72518ec9117db061e51c643648e\": container with ID starting with 15ed96e1e9c6a52ee3fa69e21deee536fed7f72518ec9117db061e51c643648e not found: ID does not exist" containerID="15ed96e1e9c6a52ee3fa69e21deee536fed7f72518ec9117db061e51c643648e" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.406469 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15ed96e1e9c6a52ee3fa69e21deee536fed7f72518ec9117db061e51c643648e"} err="failed to get container status \"15ed96e1e9c6a52ee3fa69e21deee536fed7f72518ec9117db061e51c643648e\": rpc error: code = NotFound desc = could not find container \"15ed96e1e9c6a52ee3fa69e21deee536fed7f72518ec9117db061e51c643648e\": container with ID starting with 15ed96e1e9c6a52ee3fa69e21deee536fed7f72518ec9117db061e51c643648e not found: ID does not exist" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.406563 4923 scope.go:117] "RemoveContainer" containerID="877c6e8210bfbbb050a57173fa72769c5cca178fe72691fd5da642acdfd3f260" Nov 28 11:31:38 crc kubenswrapper[4923]: E1128 11:31:38.407038 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"877c6e8210bfbbb050a57173fa72769c5cca178fe72691fd5da642acdfd3f260\": container with ID starting with 877c6e8210bfbbb050a57173fa72769c5cca178fe72691fd5da642acdfd3f260 not found: ID does not exist" containerID="877c6e8210bfbbb050a57173fa72769c5cca178fe72691fd5da642acdfd3f260" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.407084 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"877c6e8210bfbbb050a57173fa72769c5cca178fe72691fd5da642acdfd3f260"} err="failed to get container status \"877c6e8210bfbbb050a57173fa72769c5cca178fe72691fd5da642acdfd3f260\": rpc error: code = NotFound desc = could not find container \"877c6e8210bfbbb050a57173fa72769c5cca178fe72691fd5da642acdfd3f260\": container with ID starting with 877c6e8210bfbbb050a57173fa72769c5cca178fe72691fd5da642acdfd3f260 not found: ID does not exist" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.412398 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 11:31:38 crc kubenswrapper[4923]: E1128 11:31:38.412838 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26a4b167-a30a-4655-80aa-2177fe14784c" containerName="setup-container" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.412858 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="26a4b167-a30a-4655-80aa-2177fe14784c" containerName="setup-container" Nov 28 11:31:38 crc kubenswrapper[4923]: E1128 11:31:38.412881 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26a4b167-a30a-4655-80aa-2177fe14784c" containerName="rabbitmq" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.412890 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="26a4b167-a30a-4655-80aa-2177fe14784c" containerName="rabbitmq" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.413157 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="26a4b167-a30a-4655-80aa-2177fe14784c" containerName="rabbitmq" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.414385 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.449653 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.449845 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.449972 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.450012 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.449981 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.450151 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.450266 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-gslqj" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.458238 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.581467 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d4b9d25a-9809-4c97-a1dd-37d779b158cf-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.582434 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.582544 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d4b9d25a-9809-4c97-a1dd-37d779b158cf-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.582637 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d4b9d25a-9809-4c97-a1dd-37d779b158cf-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.582732 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d4b9d25a-9809-4c97-a1dd-37d779b158cf-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.582862 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d4b9d25a-9809-4c97-a1dd-37d779b158cf-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.582987 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d4b9d25a-9809-4c97-a1dd-37d779b158cf-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.583101 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d4b9d25a-9809-4c97-a1dd-37d779b158cf-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.583268 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mxm88\" (UniqueName: \"kubernetes.io/projected/d4b9d25a-9809-4c97-a1dd-37d779b158cf-kube-api-access-mxm88\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.583377 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d4b9d25a-9809-4c97-a1dd-37d779b158cf-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.583503 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d4b9d25a-9809-4c97-a1dd-37d779b158cf-config-data\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.685196 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mxm88\" (UniqueName: \"kubernetes.io/projected/d4b9d25a-9809-4c97-a1dd-37d779b158cf-kube-api-access-mxm88\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.685259 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d4b9d25a-9809-4c97-a1dd-37d779b158cf-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.685286 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d4b9d25a-9809-4c97-a1dd-37d779b158cf-config-data\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.685316 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d4b9d25a-9809-4c97-a1dd-37d779b158cf-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.685339 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.685369 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d4b9d25a-9809-4c97-a1dd-37d779b158cf-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.685395 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d4b9d25a-9809-4c97-a1dd-37d779b158cf-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.685418 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d4b9d25a-9809-4c97-a1dd-37d779b158cf-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.685476 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d4b9d25a-9809-4c97-a1dd-37d779b158cf-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.685511 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d4b9d25a-9809-4c97-a1dd-37d779b158cf-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.685549 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d4b9d25a-9809-4c97-a1dd-37d779b158cf-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.686143 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d4b9d25a-9809-4c97-a1dd-37d779b158cf-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.686256 4923 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.689443 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d4b9d25a-9809-4c97-a1dd-37d779b158cf-config-data\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.692724 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d4b9d25a-9809-4c97-a1dd-37d779b158cf-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.697581 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d4b9d25a-9809-4c97-a1dd-37d779b158cf-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.698309 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d4b9d25a-9809-4c97-a1dd-37d779b158cf-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.698500 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d4b9d25a-9809-4c97-a1dd-37d779b158cf-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.699111 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d4b9d25a-9809-4c97-a1dd-37d779b158cf-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.699566 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d4b9d25a-9809-4c97-a1dd-37d779b158cf-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.705669 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d4b9d25a-9809-4c97-a1dd-37d779b158cf-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.717417 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mxm88\" (UniqueName: \"kubernetes.io/projected/d4b9d25a-9809-4c97-a1dd-37d779b158cf-kube-api-access-mxm88\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.775445 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"d4b9d25a-9809-4c97-a1dd-37d779b158cf\") " pod="openstack/rabbitmq-server-0" Nov 28 11:31:38 crc kubenswrapper[4923]: I1128 11:31:38.798057 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.102496 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.194074 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/456d70c2-443b-455b-83fe-fc87e36534ac-plugins-conf\") pod \"456d70c2-443b-455b-83fe-fc87e36534ac\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.194488 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/456d70c2-443b-455b-83fe-fc87e36534ac-erlang-cookie-secret\") pod \"456d70c2-443b-455b-83fe-fc87e36534ac\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.194525 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-plugins\") pod \"456d70c2-443b-455b-83fe-fc87e36534ac\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.194551 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"456d70c2-443b-455b-83fe-fc87e36534ac\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.194606 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/456d70c2-443b-455b-83fe-fc87e36534ac-pod-info\") pod \"456d70c2-443b-455b-83fe-fc87e36534ac\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.194628 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-tls\") pod \"456d70c2-443b-455b-83fe-fc87e36534ac\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.194679 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-erlang-cookie\") pod \"456d70c2-443b-455b-83fe-fc87e36534ac\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.194742 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkq5z\" (UniqueName: \"kubernetes.io/projected/456d70c2-443b-455b-83fe-fc87e36534ac-kube-api-access-mkq5z\") pod \"456d70c2-443b-455b-83fe-fc87e36534ac\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.194771 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/456d70c2-443b-455b-83fe-fc87e36534ac-config-data\") pod \"456d70c2-443b-455b-83fe-fc87e36534ac\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.194813 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-confd\") pod \"456d70c2-443b-455b-83fe-fc87e36534ac\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.194834 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/456d70c2-443b-455b-83fe-fc87e36534ac-server-conf\") pod \"456d70c2-443b-455b-83fe-fc87e36534ac\" (UID: \"456d70c2-443b-455b-83fe-fc87e36534ac\") " Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.196325 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/456d70c2-443b-455b-83fe-fc87e36534ac-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "456d70c2-443b-455b-83fe-fc87e36534ac" (UID: "456d70c2-443b-455b-83fe-fc87e36534ac"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.198792 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26a4b167-a30a-4655-80aa-2177fe14784c" path="/var/lib/kubelet/pods/26a4b167-a30a-4655-80aa-2177fe14784c/volumes" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.220425 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/456d70c2-443b-455b-83fe-fc87e36534ac-kube-api-access-mkq5z" (OuterVolumeSpecName: "kube-api-access-mkq5z") pod "456d70c2-443b-455b-83fe-fc87e36534ac" (UID: "456d70c2-443b-455b-83fe-fc87e36534ac"). InnerVolumeSpecName "kube-api-access-mkq5z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.222609 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "persistence") pod "456d70c2-443b-455b-83fe-fc87e36534ac" (UID: "456d70c2-443b-455b-83fe-fc87e36534ac"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.222683 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "456d70c2-443b-455b-83fe-fc87e36534ac" (UID: "456d70c2-443b-455b-83fe-fc87e36534ac"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.223492 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "456d70c2-443b-455b-83fe-fc87e36534ac" (UID: "456d70c2-443b-455b-83fe-fc87e36534ac"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.223605 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "456d70c2-443b-455b-83fe-fc87e36534ac" (UID: "456d70c2-443b-455b-83fe-fc87e36534ac"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.223730 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/456d70c2-443b-455b-83fe-fc87e36534ac-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "456d70c2-443b-455b-83fe-fc87e36534ac" (UID: "456d70c2-443b-455b-83fe-fc87e36534ac"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.224046 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/456d70c2-443b-455b-83fe-fc87e36534ac-pod-info" (OuterVolumeSpecName: "pod-info") pod "456d70c2-443b-455b-83fe-fc87e36534ac" (UID: "456d70c2-443b-455b-83fe-fc87e36534ac"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.257009 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/456d70c2-443b-455b-83fe-fc87e36534ac-config-data" (OuterVolumeSpecName: "config-data") pod "456d70c2-443b-455b-83fe-fc87e36534ac" (UID: "456d70c2-443b-455b-83fe-fc87e36534ac"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.266887 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/456d70c2-443b-455b-83fe-fc87e36534ac-server-conf" (OuterVolumeSpecName: "server-conf") pod "456d70c2-443b-455b-83fe-fc87e36534ac" (UID: "456d70c2-443b-455b-83fe-fc87e36534ac"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.298739 4923 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/456d70c2-443b-455b-83fe-fc87e36534ac-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.298768 4923 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.298780 4923 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.298791 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkq5z\" (UniqueName: \"kubernetes.io/projected/456d70c2-443b-455b-83fe-fc87e36534ac-kube-api-access-mkq5z\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.298800 4923 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/456d70c2-443b-455b-83fe-fc87e36534ac-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.298808 4923 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/456d70c2-443b-455b-83fe-fc87e36534ac-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.298816 4923 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/456d70c2-443b-455b-83fe-fc87e36534ac-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.298823 4923 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/456d70c2-443b-455b-83fe-fc87e36534ac-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.298831 4923 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.298850 4923 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.333629 4923 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 28 11:31:39 crc kubenswrapper[4923]: W1128 11:31:39.333718 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd4b9d25a_9809_4c97_a1dd_37d779b158cf.slice/crio-c36ae13c686ecf35af4f8936304710481f9975346f47b4b5dca3739d8ca081ae WatchSource:0}: Error finding container c36ae13c686ecf35af4f8936304710481f9975346f47b4b5dca3739d8ca081ae: Status 404 returned error can't find the container with id c36ae13c686ecf35af4f8936304710481f9975346f47b4b5dca3739d8ca081ae Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.339270 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.358294 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d4b9d25a-9809-4c97-a1dd-37d779b158cf","Type":"ContainerStarted","Data":"c36ae13c686ecf35af4f8936304710481f9975346f47b4b5dca3739d8ca081ae"} Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.359737 4923 generic.go:334] "Generic (PLEG): container finished" podID="456d70c2-443b-455b-83fe-fc87e36534ac" containerID="0adef79547a29cf58c840ff6fe32e02579298f45c778018b82629cb1f6d2e4e8" exitCode=0 Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.359776 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"456d70c2-443b-455b-83fe-fc87e36534ac","Type":"ContainerDied","Data":"0adef79547a29cf58c840ff6fe32e02579298f45c778018b82629cb1f6d2e4e8"} Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.359792 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"456d70c2-443b-455b-83fe-fc87e36534ac","Type":"ContainerDied","Data":"5ffc4ee04064bb80be9d39e3dc205bb638951a4d3a75fe73e58349463458df24"} Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.359807 4923 scope.go:117] "RemoveContainer" containerID="0adef79547a29cf58c840ff6fe32e02579298f45c778018b82629cb1f6d2e4e8" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.359905 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.368580 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "456d70c2-443b-455b-83fe-fc87e36534ac" (UID: "456d70c2-443b-455b-83fe-fc87e36534ac"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.400295 4923 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/456d70c2-443b-455b-83fe-fc87e36534ac-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.400323 4923 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.400445 4923 scope.go:117] "RemoveContainer" containerID="6b9977387f4a04660289708811d6e9fd63ab44d05d56ad4d5f94de24f39428d6" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.422602 4923 scope.go:117] "RemoveContainer" containerID="0adef79547a29cf58c840ff6fe32e02579298f45c778018b82629cb1f6d2e4e8" Nov 28 11:31:39 crc kubenswrapper[4923]: E1128 11:31:39.425091 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0adef79547a29cf58c840ff6fe32e02579298f45c778018b82629cb1f6d2e4e8\": container with ID starting with 0adef79547a29cf58c840ff6fe32e02579298f45c778018b82629cb1f6d2e4e8 not found: ID does not exist" containerID="0adef79547a29cf58c840ff6fe32e02579298f45c778018b82629cb1f6d2e4e8" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.425178 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0adef79547a29cf58c840ff6fe32e02579298f45c778018b82629cb1f6d2e4e8"} err="failed to get container status \"0adef79547a29cf58c840ff6fe32e02579298f45c778018b82629cb1f6d2e4e8\": rpc error: code = NotFound desc = could not find container \"0adef79547a29cf58c840ff6fe32e02579298f45c778018b82629cb1f6d2e4e8\": container with ID starting with 0adef79547a29cf58c840ff6fe32e02579298f45c778018b82629cb1f6d2e4e8 not found: ID does not exist" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.425208 4923 scope.go:117] "RemoveContainer" containerID="6b9977387f4a04660289708811d6e9fd63ab44d05d56ad4d5f94de24f39428d6" Nov 28 11:31:39 crc kubenswrapper[4923]: E1128 11:31:39.425642 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b9977387f4a04660289708811d6e9fd63ab44d05d56ad4d5f94de24f39428d6\": container with ID starting with 6b9977387f4a04660289708811d6e9fd63ab44d05d56ad4d5f94de24f39428d6 not found: ID does not exist" containerID="6b9977387f4a04660289708811d6e9fd63ab44d05d56ad4d5f94de24f39428d6" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.425683 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b9977387f4a04660289708811d6e9fd63ab44d05d56ad4d5f94de24f39428d6"} err="failed to get container status \"6b9977387f4a04660289708811d6e9fd63ab44d05d56ad4d5f94de24f39428d6\": rpc error: code = NotFound desc = could not find container \"6b9977387f4a04660289708811d6e9fd63ab44d05d56ad4d5f94de24f39428d6\": container with ID starting with 6b9977387f4a04660289708811d6e9fd63ab44d05d56ad4d5f94de24f39428d6 not found: ID does not exist" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.697692 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.704762 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.737393 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 11:31:39 crc kubenswrapper[4923]: E1128 11:31:39.737825 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="456d70c2-443b-455b-83fe-fc87e36534ac" containerName="rabbitmq" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.737839 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="456d70c2-443b-455b-83fe-fc87e36534ac" containerName="rabbitmq" Nov 28 11:31:39 crc kubenswrapper[4923]: E1128 11:31:39.737848 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="456d70c2-443b-455b-83fe-fc87e36534ac" containerName="setup-container" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.737854 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="456d70c2-443b-455b-83fe-fc87e36534ac" containerName="setup-container" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.738036 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="456d70c2-443b-455b-83fe-fc87e36534ac" containerName="rabbitmq" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.738983 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.743792 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.743809 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.743897 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.744024 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.744204 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.744313 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-5xc6j" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.744512 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.749722 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.908455 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/66114922-3d2e-40e1-9d35-84b0960ea5a2-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.908511 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/66114922-3d2e-40e1-9d35-84b0960ea5a2-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.908624 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.908650 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/66114922-3d2e-40e1-9d35-84b0960ea5a2-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.908724 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/66114922-3d2e-40e1-9d35-84b0960ea5a2-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.908750 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/66114922-3d2e-40e1-9d35-84b0960ea5a2-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.908782 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jv4p\" (UniqueName: \"kubernetes.io/projected/66114922-3d2e-40e1-9d35-84b0960ea5a2-kube-api-access-6jv4p\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.908815 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/66114922-3d2e-40e1-9d35-84b0960ea5a2-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.908836 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/66114922-3d2e-40e1-9d35-84b0960ea5a2-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.909094 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/66114922-3d2e-40e1-9d35-84b0960ea5a2-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:39 crc kubenswrapper[4923]: I1128 11:31:39.909217 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/66114922-3d2e-40e1-9d35-84b0960ea5a2-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.011408 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.011466 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/66114922-3d2e-40e1-9d35-84b0960ea5a2-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.011572 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/66114922-3d2e-40e1-9d35-84b0960ea5a2-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.011607 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/66114922-3d2e-40e1-9d35-84b0960ea5a2-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.011645 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jv4p\" (UniqueName: \"kubernetes.io/projected/66114922-3d2e-40e1-9d35-84b0960ea5a2-kube-api-access-6jv4p\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.011688 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/66114922-3d2e-40e1-9d35-84b0960ea5a2-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.011711 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/66114922-3d2e-40e1-9d35-84b0960ea5a2-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.011749 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/66114922-3d2e-40e1-9d35-84b0960ea5a2-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.011783 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/66114922-3d2e-40e1-9d35-84b0960ea5a2-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.011842 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/66114922-3d2e-40e1-9d35-84b0960ea5a2-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.011856 4923 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.012308 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/66114922-3d2e-40e1-9d35-84b0960ea5a2-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.012796 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/66114922-3d2e-40e1-9d35-84b0960ea5a2-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.013212 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/66114922-3d2e-40e1-9d35-84b0960ea5a2-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.011869 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/66114922-3d2e-40e1-9d35-84b0960ea5a2-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.013389 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/66114922-3d2e-40e1-9d35-84b0960ea5a2-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.013553 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/66114922-3d2e-40e1-9d35-84b0960ea5a2-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.019314 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/66114922-3d2e-40e1-9d35-84b0960ea5a2-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.026291 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/66114922-3d2e-40e1-9d35-84b0960ea5a2-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.035479 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/66114922-3d2e-40e1-9d35-84b0960ea5a2-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.046553 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jv4p\" (UniqueName: \"kubernetes.io/projected/66114922-3d2e-40e1-9d35-84b0960ea5a2-kube-api-access-6jv4p\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.048095 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/66114922-3d2e-40e1-9d35-84b0960ea5a2-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.063215 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"66114922-3d2e-40e1-9d35-84b0960ea5a2\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.356261 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:31:40 crc kubenswrapper[4923]: I1128 11:31:40.829610 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 11:31:41 crc kubenswrapper[4923]: I1128 11:31:41.180957 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="456d70c2-443b-455b-83fe-fc87e36534ac" path="/var/lib/kubelet/pods/456d70c2-443b-455b-83fe-fc87e36534ac/volumes" Nov 28 11:31:41 crc kubenswrapper[4923]: I1128 11:31:41.391311 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d4b9d25a-9809-4c97-a1dd-37d779b158cf","Type":"ContainerStarted","Data":"7dff06c4c961f84b1460d76aa8bb41ad2e0bf5290103a6445a562e76ea605613"} Nov 28 11:31:41 crc kubenswrapper[4923]: I1128 11:31:41.400992 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"66114922-3d2e-40e1-9d35-84b0960ea5a2","Type":"ContainerStarted","Data":"f3d0319db7006ba58eacaf00c8f203d931e433398f6c441cd960e26119844edf"} Nov 28 11:31:42 crc kubenswrapper[4923]: I1128 11:31:42.414831 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"66114922-3d2e-40e1-9d35-84b0960ea5a2","Type":"ContainerStarted","Data":"1feca1c933fef747281644fff150beb21f017deb7ec2623f90cf82e58cbe2c36"} Nov 28 11:31:44 crc kubenswrapper[4923]: I1128 11:31:44.026286 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:31:44 crc kubenswrapper[4923]: I1128 11:31:44.026620 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:31:44 crc kubenswrapper[4923]: I1128 11:31:44.026655 4923 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:31:44 crc kubenswrapper[4923]: I1128 11:31:44.027376 4923 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d1a2e1beb233079a250c29730400b1c9cdbf26210af36136b746e09631ce81a5"} pod="openshift-machine-config-operator/machine-config-daemon-bwdth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 11:31:44 crc kubenswrapper[4923]: I1128 11:31:44.027440 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" containerID="cri-o://d1a2e1beb233079a250c29730400b1c9cdbf26210af36136b746e09631ce81a5" gracePeriod=600 Nov 28 11:31:44 crc kubenswrapper[4923]: I1128 11:31:44.435855 4923 generic.go:334] "Generic (PLEG): container finished" podID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerID="d1a2e1beb233079a250c29730400b1c9cdbf26210af36136b746e09631ce81a5" exitCode=0 Nov 28 11:31:44 crc kubenswrapper[4923]: I1128 11:31:44.436099 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerDied","Data":"d1a2e1beb233079a250c29730400b1c9cdbf26210af36136b746e09631ce81a5"} Nov 28 11:31:44 crc kubenswrapper[4923]: I1128 11:31:44.436237 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerStarted","Data":"59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a"} Nov 28 11:31:44 crc kubenswrapper[4923]: I1128 11:31:44.436259 4923 scope.go:117] "RemoveContainer" containerID="e854d096d0336c4d9ad4dac3da4cdf01df8dfe8d9a2f05530bd236f4a045e2f0" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.582457 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-bqzmr"] Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.584400 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.587054 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.598537 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-bqzmr"] Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.747857 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-config\") pod \"dnsmasq-dns-6447ccbd8f-bqzmr\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.748185 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-openstack-edpm-ipam\") pod \"dnsmasq-dns-6447ccbd8f-bqzmr\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.748243 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-ovsdbserver-nb\") pod \"dnsmasq-dns-6447ccbd8f-bqzmr\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.748280 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2gvr\" (UniqueName: \"kubernetes.io/projected/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-kube-api-access-w2gvr\") pod \"dnsmasq-dns-6447ccbd8f-bqzmr\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.748302 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-dns-svc\") pod \"dnsmasq-dns-6447ccbd8f-bqzmr\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.748337 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-ovsdbserver-sb\") pod \"dnsmasq-dns-6447ccbd8f-bqzmr\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.849423 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2gvr\" (UniqueName: \"kubernetes.io/projected/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-kube-api-access-w2gvr\") pod \"dnsmasq-dns-6447ccbd8f-bqzmr\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.849478 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-dns-svc\") pod \"dnsmasq-dns-6447ccbd8f-bqzmr\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.849534 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-ovsdbserver-sb\") pod \"dnsmasq-dns-6447ccbd8f-bqzmr\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.849587 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-config\") pod \"dnsmasq-dns-6447ccbd8f-bqzmr\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.849659 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-openstack-edpm-ipam\") pod \"dnsmasq-dns-6447ccbd8f-bqzmr\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.849704 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-ovsdbserver-nb\") pod \"dnsmasq-dns-6447ccbd8f-bqzmr\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.850503 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-ovsdbserver-sb\") pod \"dnsmasq-dns-6447ccbd8f-bqzmr\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.850603 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-dns-svc\") pod \"dnsmasq-dns-6447ccbd8f-bqzmr\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.850874 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-config\") pod \"dnsmasq-dns-6447ccbd8f-bqzmr\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.851184 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-ovsdbserver-nb\") pod \"dnsmasq-dns-6447ccbd8f-bqzmr\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.851216 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-openstack-edpm-ipam\") pod \"dnsmasq-dns-6447ccbd8f-bqzmr\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.872485 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2gvr\" (UniqueName: \"kubernetes.io/projected/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-kube-api-access-w2gvr\") pod \"dnsmasq-dns-6447ccbd8f-bqzmr\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:46 crc kubenswrapper[4923]: I1128 11:31:46.900779 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:47 crc kubenswrapper[4923]: W1128 11:31:47.425014 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9c056b3a_4b2a_4da3_bfc9_5d6812f5283f.slice/crio-9c5b4900d97b3357ec128349b1ae99e2f0d4801eac5c2835136fa17d6eda11ab WatchSource:0}: Error finding container 9c5b4900d97b3357ec128349b1ae99e2f0d4801eac5c2835136fa17d6eda11ab: Status 404 returned error can't find the container with id 9c5b4900d97b3357ec128349b1ae99e2f0d4801eac5c2835136fa17d6eda11ab Nov 28 11:31:47 crc kubenswrapper[4923]: I1128 11:31:47.427280 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-bqzmr"] Nov 28 11:31:47 crc kubenswrapper[4923]: I1128 11:31:47.467584 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" event={"ID":"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f","Type":"ContainerStarted","Data":"9c5b4900d97b3357ec128349b1ae99e2f0d4801eac5c2835136fa17d6eda11ab"} Nov 28 11:31:48 crc kubenswrapper[4923]: I1128 11:31:48.481531 4923 generic.go:334] "Generic (PLEG): container finished" podID="9c056b3a-4b2a-4da3-bfc9-5d6812f5283f" containerID="9206d98a152b173452119b7bea70e7cb0ff3cdb8f47892bd2edf2556cf393ed8" exitCode=0 Nov 28 11:31:48 crc kubenswrapper[4923]: I1128 11:31:48.483052 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" event={"ID":"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f","Type":"ContainerDied","Data":"9206d98a152b173452119b7bea70e7cb0ff3cdb8f47892bd2edf2556cf393ed8"} Nov 28 11:31:49 crc kubenswrapper[4923]: I1128 11:31:49.502335 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" event={"ID":"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f","Type":"ContainerStarted","Data":"f8204e27252577219d4330cc2bc1ba567c27229859ccb8876b83271e5588e19b"} Nov 28 11:31:49 crc kubenswrapper[4923]: I1128 11:31:49.503158 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:49 crc kubenswrapper[4923]: I1128 11:31:49.528032 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" podStartSLOduration=3.52801569 podStartE2EDuration="3.52801569s" podCreationTimestamp="2025-11-28 11:31:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:31:49.522560847 +0000 UTC m=+1388.651245067" watchObservedRunningTime="2025-11-28 11:31:49.52801569 +0000 UTC m=+1388.656699910" Nov 28 11:31:56 crc kubenswrapper[4923]: I1128 11:31:56.903215 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:31:56 crc kubenswrapper[4923]: I1128 11:31:56.980341 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-rqrzr"] Nov 28 11:31:56 crc kubenswrapper[4923]: I1128 11:31:56.980712 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" podUID="7459bffc-943f-4bb7-a293-952b538a7b5e" containerName="dnsmasq-dns" containerID="cri-o://91a1f6464728cbaca6a6d511a696a96f4bbf009d6e3e860f30ef9e55cba6f58e" gracePeriod=10 Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.127291 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79794c8ddf-8zvpb"] Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.130370 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.157351 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79794c8ddf-8zvpb"] Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.294916 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a3965665-ea65-453f-8139-d611cbbc3833-ovsdbserver-nb\") pod \"dnsmasq-dns-79794c8ddf-8zvpb\" (UID: \"a3965665-ea65-453f-8139-d611cbbc3833\") " pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.295240 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3965665-ea65-453f-8139-d611cbbc3833-config\") pod \"dnsmasq-dns-79794c8ddf-8zvpb\" (UID: \"a3965665-ea65-453f-8139-d611cbbc3833\") " pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.295270 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a3965665-ea65-453f-8139-d611cbbc3833-ovsdbserver-sb\") pod \"dnsmasq-dns-79794c8ddf-8zvpb\" (UID: \"a3965665-ea65-453f-8139-d611cbbc3833\") " pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.296828 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47nrl\" (UniqueName: \"kubernetes.io/projected/a3965665-ea65-453f-8139-d611cbbc3833-kube-api-access-47nrl\") pod \"dnsmasq-dns-79794c8ddf-8zvpb\" (UID: \"a3965665-ea65-453f-8139-d611cbbc3833\") " pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.296888 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a3965665-ea65-453f-8139-d611cbbc3833-dns-svc\") pod \"dnsmasq-dns-79794c8ddf-8zvpb\" (UID: \"a3965665-ea65-453f-8139-d611cbbc3833\") " pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.296904 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/a3965665-ea65-453f-8139-d611cbbc3833-openstack-edpm-ipam\") pod \"dnsmasq-dns-79794c8ddf-8zvpb\" (UID: \"a3965665-ea65-453f-8139-d611cbbc3833\") " pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.398977 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47nrl\" (UniqueName: \"kubernetes.io/projected/a3965665-ea65-453f-8139-d611cbbc3833-kube-api-access-47nrl\") pod \"dnsmasq-dns-79794c8ddf-8zvpb\" (UID: \"a3965665-ea65-453f-8139-d611cbbc3833\") " pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.399024 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a3965665-ea65-453f-8139-d611cbbc3833-dns-svc\") pod \"dnsmasq-dns-79794c8ddf-8zvpb\" (UID: \"a3965665-ea65-453f-8139-d611cbbc3833\") " pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.399044 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/a3965665-ea65-453f-8139-d611cbbc3833-openstack-edpm-ipam\") pod \"dnsmasq-dns-79794c8ddf-8zvpb\" (UID: \"a3965665-ea65-453f-8139-d611cbbc3833\") " pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.399090 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a3965665-ea65-453f-8139-d611cbbc3833-ovsdbserver-nb\") pod \"dnsmasq-dns-79794c8ddf-8zvpb\" (UID: \"a3965665-ea65-453f-8139-d611cbbc3833\") " pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.399151 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3965665-ea65-453f-8139-d611cbbc3833-config\") pod \"dnsmasq-dns-79794c8ddf-8zvpb\" (UID: \"a3965665-ea65-453f-8139-d611cbbc3833\") " pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.399176 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a3965665-ea65-453f-8139-d611cbbc3833-ovsdbserver-sb\") pod \"dnsmasq-dns-79794c8ddf-8zvpb\" (UID: \"a3965665-ea65-453f-8139-d611cbbc3833\") " pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.399977 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a3965665-ea65-453f-8139-d611cbbc3833-ovsdbserver-sb\") pod \"dnsmasq-dns-79794c8ddf-8zvpb\" (UID: \"a3965665-ea65-453f-8139-d611cbbc3833\") " pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.400196 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a3965665-ea65-453f-8139-d611cbbc3833-dns-svc\") pod \"dnsmasq-dns-79794c8ddf-8zvpb\" (UID: \"a3965665-ea65-453f-8139-d611cbbc3833\") " pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.400512 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a3965665-ea65-453f-8139-d611cbbc3833-ovsdbserver-nb\") pod \"dnsmasq-dns-79794c8ddf-8zvpb\" (UID: \"a3965665-ea65-453f-8139-d611cbbc3833\") " pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.400720 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3965665-ea65-453f-8139-d611cbbc3833-config\") pod \"dnsmasq-dns-79794c8ddf-8zvpb\" (UID: \"a3965665-ea65-453f-8139-d611cbbc3833\") " pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.401129 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/a3965665-ea65-453f-8139-d611cbbc3833-openstack-edpm-ipam\") pod \"dnsmasq-dns-79794c8ddf-8zvpb\" (UID: \"a3965665-ea65-453f-8139-d611cbbc3833\") " pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.415969 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47nrl\" (UniqueName: \"kubernetes.io/projected/a3965665-ea65-453f-8139-d611cbbc3833-kube-api-access-47nrl\") pod \"dnsmasq-dns-79794c8ddf-8zvpb\" (UID: \"a3965665-ea65-453f-8139-d611cbbc3833\") " pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.459775 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.473305 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.596526 4923 generic.go:334] "Generic (PLEG): container finished" podID="7459bffc-943f-4bb7-a293-952b538a7b5e" containerID="91a1f6464728cbaca6a6d511a696a96f4bbf009d6e3e860f30ef9e55cba6f58e" exitCode=0 Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.596561 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" event={"ID":"7459bffc-943f-4bb7-a293-952b538a7b5e","Type":"ContainerDied","Data":"91a1f6464728cbaca6a6d511a696a96f4bbf009d6e3e860f30ef9e55cba6f58e"} Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.596585 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" event={"ID":"7459bffc-943f-4bb7-a293-952b538a7b5e","Type":"ContainerDied","Data":"e3c91733f7e719e7531206875aa7bc5715a7f6a352c689835e1bfd34ff525aa8"} Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.596601 4923 scope.go:117] "RemoveContainer" containerID="91a1f6464728cbaca6a6d511a696a96f4bbf009d6e3e860f30ef9e55cba6f58e" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.596720 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b856c5697-rqrzr" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.610883 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-ovsdbserver-nb\") pod \"7459bffc-943f-4bb7-a293-952b538a7b5e\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.610915 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-dns-svc\") pod \"7459bffc-943f-4bb7-a293-952b538a7b5e\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.610953 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-87d6p\" (UniqueName: \"kubernetes.io/projected/7459bffc-943f-4bb7-a293-952b538a7b5e-kube-api-access-87d6p\") pod \"7459bffc-943f-4bb7-a293-952b538a7b5e\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.610977 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-config\") pod \"7459bffc-943f-4bb7-a293-952b538a7b5e\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.611006 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-ovsdbserver-sb\") pod \"7459bffc-943f-4bb7-a293-952b538a7b5e\" (UID: \"7459bffc-943f-4bb7-a293-952b538a7b5e\") " Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.636109 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7459bffc-943f-4bb7-a293-952b538a7b5e-kube-api-access-87d6p" (OuterVolumeSpecName: "kube-api-access-87d6p") pod "7459bffc-943f-4bb7-a293-952b538a7b5e" (UID: "7459bffc-943f-4bb7-a293-952b538a7b5e"). InnerVolumeSpecName "kube-api-access-87d6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.649395 4923 scope.go:117] "RemoveContainer" containerID="212014ca59ea0c15cffe96fc3cd477a2721f81262fa09c7762665489316942fa" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.704591 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7459bffc-943f-4bb7-a293-952b538a7b5e" (UID: "7459bffc-943f-4bb7-a293-952b538a7b5e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.706877 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7459bffc-943f-4bb7-a293-952b538a7b5e" (UID: "7459bffc-943f-4bb7-a293-952b538a7b5e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.715963 4923 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.715998 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-87d6p\" (UniqueName: \"kubernetes.io/projected/7459bffc-943f-4bb7-a293-952b538a7b5e-kube-api-access-87d6p\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.716009 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.744205 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-config" (OuterVolumeSpecName: "config") pod "7459bffc-943f-4bb7-a293-952b538a7b5e" (UID: "7459bffc-943f-4bb7-a293-952b538a7b5e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.746820 4923 scope.go:117] "RemoveContainer" containerID="91a1f6464728cbaca6a6d511a696a96f4bbf009d6e3e860f30ef9e55cba6f58e" Nov 28 11:31:57 crc kubenswrapper[4923]: E1128 11:31:57.751035 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91a1f6464728cbaca6a6d511a696a96f4bbf009d6e3e860f30ef9e55cba6f58e\": container with ID starting with 91a1f6464728cbaca6a6d511a696a96f4bbf009d6e3e860f30ef9e55cba6f58e not found: ID does not exist" containerID="91a1f6464728cbaca6a6d511a696a96f4bbf009d6e3e860f30ef9e55cba6f58e" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.751085 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91a1f6464728cbaca6a6d511a696a96f4bbf009d6e3e860f30ef9e55cba6f58e"} err="failed to get container status \"91a1f6464728cbaca6a6d511a696a96f4bbf009d6e3e860f30ef9e55cba6f58e\": rpc error: code = NotFound desc = could not find container \"91a1f6464728cbaca6a6d511a696a96f4bbf009d6e3e860f30ef9e55cba6f58e\": container with ID starting with 91a1f6464728cbaca6a6d511a696a96f4bbf009d6e3e860f30ef9e55cba6f58e not found: ID does not exist" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.751114 4923 scope.go:117] "RemoveContainer" containerID="212014ca59ea0c15cffe96fc3cd477a2721f81262fa09c7762665489316942fa" Nov 28 11:31:57 crc kubenswrapper[4923]: E1128 11:31:57.751545 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"212014ca59ea0c15cffe96fc3cd477a2721f81262fa09c7762665489316942fa\": container with ID starting with 212014ca59ea0c15cffe96fc3cd477a2721f81262fa09c7762665489316942fa not found: ID does not exist" containerID="212014ca59ea0c15cffe96fc3cd477a2721f81262fa09c7762665489316942fa" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.751594 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"212014ca59ea0c15cffe96fc3cd477a2721f81262fa09c7762665489316942fa"} err="failed to get container status \"212014ca59ea0c15cffe96fc3cd477a2721f81262fa09c7762665489316942fa\": rpc error: code = NotFound desc = could not find container \"212014ca59ea0c15cffe96fc3cd477a2721f81262fa09c7762665489316942fa\": container with ID starting with 212014ca59ea0c15cffe96fc3cd477a2721f81262fa09c7762665489316942fa not found: ID does not exist" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.758509 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7459bffc-943f-4bb7-a293-952b538a7b5e" (UID: "7459bffc-943f-4bb7-a293-952b538a7b5e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.819395 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.819442 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7459bffc-943f-4bb7-a293-952b538a7b5e-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.929078 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-rqrzr"] Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.935811 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-rqrzr"] Nov 28 11:31:57 crc kubenswrapper[4923]: I1128 11:31:57.959495 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79794c8ddf-8zvpb"] Nov 28 11:31:57 crc kubenswrapper[4923]: W1128 11:31:57.962476 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda3965665_ea65_453f_8139_d611cbbc3833.slice/crio-55a9b4dd37acdb9b86885668a7cc6dfd797ed183767bd1da559064a7cfe84d3f WatchSource:0}: Error finding container 55a9b4dd37acdb9b86885668a7cc6dfd797ed183767bd1da559064a7cfe84d3f: Status 404 returned error can't find the container with id 55a9b4dd37acdb9b86885668a7cc6dfd797ed183767bd1da559064a7cfe84d3f Nov 28 11:31:58 crc kubenswrapper[4923]: I1128 11:31:58.607048 4923 generic.go:334] "Generic (PLEG): container finished" podID="a3965665-ea65-453f-8139-d611cbbc3833" containerID="877c125c36c73d7d93275388a49be9fcb68407977a12185bf75951a4311b0753" exitCode=0 Nov 28 11:31:58 crc kubenswrapper[4923]: I1128 11:31:58.607335 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" event={"ID":"a3965665-ea65-453f-8139-d611cbbc3833","Type":"ContainerDied","Data":"877c125c36c73d7d93275388a49be9fcb68407977a12185bf75951a4311b0753"} Nov 28 11:31:58 crc kubenswrapper[4923]: I1128 11:31:58.607516 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" event={"ID":"a3965665-ea65-453f-8139-d611cbbc3833","Type":"ContainerStarted","Data":"55a9b4dd37acdb9b86885668a7cc6dfd797ed183767bd1da559064a7cfe84d3f"} Nov 28 11:31:59 crc kubenswrapper[4923]: I1128 11:31:59.192915 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7459bffc-943f-4bb7-a293-952b538a7b5e" path="/var/lib/kubelet/pods/7459bffc-943f-4bb7-a293-952b538a7b5e/volumes" Nov 28 11:31:59 crc kubenswrapper[4923]: I1128 11:31:59.625098 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" event={"ID":"a3965665-ea65-453f-8139-d611cbbc3833","Type":"ContainerStarted","Data":"93446291d930005d48f793db1fad796a745f6f2c32b8f11a9ad51ca8e15990f1"} Nov 28 11:31:59 crc kubenswrapper[4923]: I1128 11:31:59.626257 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:31:59 crc kubenswrapper[4923]: I1128 11:31:59.654504 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" podStartSLOduration=2.654487659 podStartE2EDuration="2.654487659s" podCreationTimestamp="2025-11-28 11:31:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:31:59.653051279 +0000 UTC m=+1398.781735539" watchObservedRunningTime="2025-11-28 11:31:59.654487659 +0000 UTC m=+1398.783171879" Nov 28 11:32:07 crc kubenswrapper[4923]: I1128 11:32:07.462282 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-79794c8ddf-8zvpb" Nov 28 11:32:07 crc kubenswrapper[4923]: I1128 11:32:07.581317 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-bqzmr"] Nov 28 11:32:07 crc kubenswrapper[4923]: I1128 11:32:07.581573 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" podUID="9c056b3a-4b2a-4da3-bfc9-5d6812f5283f" containerName="dnsmasq-dns" containerID="cri-o://f8204e27252577219d4330cc2bc1ba567c27229859ccb8876b83271e5588e19b" gracePeriod=10 Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.047593 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.235551 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-ovsdbserver-sb\") pod \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.235997 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w2gvr\" (UniqueName: \"kubernetes.io/projected/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-kube-api-access-w2gvr\") pod \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.236028 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-openstack-edpm-ipam\") pod \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.236096 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-config\") pod \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.236146 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-ovsdbserver-nb\") pod \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.236178 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-dns-svc\") pod \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\" (UID: \"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f\") " Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.241725 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-kube-api-access-w2gvr" (OuterVolumeSpecName: "kube-api-access-w2gvr") pod "9c056b3a-4b2a-4da3-bfc9-5d6812f5283f" (UID: "9c056b3a-4b2a-4da3-bfc9-5d6812f5283f"). InnerVolumeSpecName "kube-api-access-w2gvr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.284083 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-config" (OuterVolumeSpecName: "config") pod "9c056b3a-4b2a-4da3-bfc9-5d6812f5283f" (UID: "9c056b3a-4b2a-4da3-bfc9-5d6812f5283f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.285192 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9c056b3a-4b2a-4da3-bfc9-5d6812f5283f" (UID: "9c056b3a-4b2a-4da3-bfc9-5d6812f5283f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.293380 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "9c056b3a-4b2a-4da3-bfc9-5d6812f5283f" (UID: "9c056b3a-4b2a-4da3-bfc9-5d6812f5283f"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.309415 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9c056b3a-4b2a-4da3-bfc9-5d6812f5283f" (UID: "9c056b3a-4b2a-4da3-bfc9-5d6812f5283f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.315336 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9c056b3a-4b2a-4da3-bfc9-5d6812f5283f" (UID: "9c056b3a-4b2a-4da3-bfc9-5d6812f5283f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.340308 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w2gvr\" (UniqueName: \"kubernetes.io/projected/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-kube-api-access-w2gvr\") on node \"crc\" DevicePath \"\"" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.340346 4923 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.340359 4923 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.340370 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.340381 4923 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.340390 4923 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.714291 4923 generic.go:334] "Generic (PLEG): container finished" podID="9c056b3a-4b2a-4da3-bfc9-5d6812f5283f" containerID="f8204e27252577219d4330cc2bc1ba567c27229859ccb8876b83271e5588e19b" exitCode=0 Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.714354 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" event={"ID":"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f","Type":"ContainerDied","Data":"f8204e27252577219d4330cc2bc1ba567c27229859ccb8876b83271e5588e19b"} Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.714409 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" event={"ID":"9c056b3a-4b2a-4da3-bfc9-5d6812f5283f","Type":"ContainerDied","Data":"9c5b4900d97b3357ec128349b1ae99e2f0d4801eac5c2835136fa17d6eda11ab"} Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.714432 4923 scope.go:117] "RemoveContainer" containerID="f8204e27252577219d4330cc2bc1ba567c27229859ccb8876b83271e5588e19b" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.714351 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6447ccbd8f-bqzmr" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.747303 4923 scope.go:117] "RemoveContainer" containerID="9206d98a152b173452119b7bea70e7cb0ff3cdb8f47892bd2edf2556cf393ed8" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.789669 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-bqzmr"] Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.789750 4923 scope.go:117] "RemoveContainer" containerID="f8204e27252577219d4330cc2bc1ba567c27229859ccb8876b83271e5588e19b" Nov 28 11:32:08 crc kubenswrapper[4923]: E1128 11:32:08.790607 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8204e27252577219d4330cc2bc1ba567c27229859ccb8876b83271e5588e19b\": container with ID starting with f8204e27252577219d4330cc2bc1ba567c27229859ccb8876b83271e5588e19b not found: ID does not exist" containerID="f8204e27252577219d4330cc2bc1ba567c27229859ccb8876b83271e5588e19b" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.790650 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8204e27252577219d4330cc2bc1ba567c27229859ccb8876b83271e5588e19b"} err="failed to get container status \"f8204e27252577219d4330cc2bc1ba567c27229859ccb8876b83271e5588e19b\": rpc error: code = NotFound desc = could not find container \"f8204e27252577219d4330cc2bc1ba567c27229859ccb8876b83271e5588e19b\": container with ID starting with f8204e27252577219d4330cc2bc1ba567c27229859ccb8876b83271e5588e19b not found: ID does not exist" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.790675 4923 scope.go:117] "RemoveContainer" containerID="9206d98a152b173452119b7bea70e7cb0ff3cdb8f47892bd2edf2556cf393ed8" Nov 28 11:32:08 crc kubenswrapper[4923]: E1128 11:32:08.791084 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9206d98a152b173452119b7bea70e7cb0ff3cdb8f47892bd2edf2556cf393ed8\": container with ID starting with 9206d98a152b173452119b7bea70e7cb0ff3cdb8f47892bd2edf2556cf393ed8 not found: ID does not exist" containerID="9206d98a152b173452119b7bea70e7cb0ff3cdb8f47892bd2edf2556cf393ed8" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.791110 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9206d98a152b173452119b7bea70e7cb0ff3cdb8f47892bd2edf2556cf393ed8"} err="failed to get container status \"9206d98a152b173452119b7bea70e7cb0ff3cdb8f47892bd2edf2556cf393ed8\": rpc error: code = NotFound desc = could not find container \"9206d98a152b173452119b7bea70e7cb0ff3cdb8f47892bd2edf2556cf393ed8\": container with ID starting with 9206d98a152b173452119b7bea70e7cb0ff3cdb8f47892bd2edf2556cf393ed8 not found: ID does not exist" Nov 28 11:32:08 crc kubenswrapper[4923]: I1128 11:32:08.796743 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-bqzmr"] Nov 28 11:32:09 crc kubenswrapper[4923]: I1128 11:32:09.181524 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c056b3a-4b2a-4da3-bfc9-5d6812f5283f" path="/var/lib/kubelet/pods/9c056b3a-4b2a-4da3-bfc9-5d6812f5283f/volumes" Nov 28 11:32:13 crc kubenswrapper[4923]: I1128 11:32:13.769539 4923 generic.go:334] "Generic (PLEG): container finished" podID="d4b9d25a-9809-4c97-a1dd-37d779b158cf" containerID="7dff06c4c961f84b1460d76aa8bb41ad2e0bf5290103a6445a562e76ea605613" exitCode=0 Nov 28 11:32:13 crc kubenswrapper[4923]: I1128 11:32:13.769602 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d4b9d25a-9809-4c97-a1dd-37d779b158cf","Type":"ContainerDied","Data":"7dff06c4c961f84b1460d76aa8bb41ad2e0bf5290103a6445a562e76ea605613"} Nov 28 11:32:14 crc kubenswrapper[4923]: I1128 11:32:14.783884 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d4b9d25a-9809-4c97-a1dd-37d779b158cf","Type":"ContainerStarted","Data":"ced50500043a6adb15cf97bb39afcb0776f39743050cc0f8b23404472f283281"} Nov 28 11:32:14 crc kubenswrapper[4923]: I1128 11:32:14.784509 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 28 11:32:14 crc kubenswrapper[4923]: I1128 11:32:14.826191 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.826158639 podStartE2EDuration="36.826158639s" podCreationTimestamp="2025-11-28 11:31:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:32:14.819789451 +0000 UTC m=+1413.948473661" watchObservedRunningTime="2025-11-28 11:32:14.826158639 +0000 UTC m=+1413.954842859" Nov 28 11:32:15 crc kubenswrapper[4923]: I1128 11:32:15.796222 4923 generic.go:334] "Generic (PLEG): container finished" podID="66114922-3d2e-40e1-9d35-84b0960ea5a2" containerID="1feca1c933fef747281644fff150beb21f017deb7ec2623f90cf82e58cbe2c36" exitCode=0 Nov 28 11:32:15 crc kubenswrapper[4923]: I1128 11:32:15.796348 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"66114922-3d2e-40e1-9d35-84b0960ea5a2","Type":"ContainerDied","Data":"1feca1c933fef747281644fff150beb21f017deb7ec2623f90cf82e58cbe2c36"} Nov 28 11:32:16 crc kubenswrapper[4923]: I1128 11:32:16.805914 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"66114922-3d2e-40e1-9d35-84b0960ea5a2","Type":"ContainerStarted","Data":"e482ca48ca2c618e4dd74d046bd3aae45a2be9d41c6a7db750f03eacdab1c3a9"} Nov 28 11:32:16 crc kubenswrapper[4923]: I1128 11:32:16.806434 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:32:16 crc kubenswrapper[4923]: I1128 11:32:16.827306 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.82729322 podStartE2EDuration="37.82729322s" podCreationTimestamp="2025-11-28 11:31:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:32:16.823560526 +0000 UTC m=+1415.952244726" watchObservedRunningTime="2025-11-28 11:32:16.82729322 +0000 UTC m=+1415.955977430" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.768524 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw"] Nov 28 11:32:17 crc kubenswrapper[4923]: E1128 11:32:17.768949 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c056b3a-4b2a-4da3-bfc9-5d6812f5283f" containerName="init" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.768960 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c056b3a-4b2a-4da3-bfc9-5d6812f5283f" containerName="init" Nov 28 11:32:17 crc kubenswrapper[4923]: E1128 11:32:17.768976 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7459bffc-943f-4bb7-a293-952b538a7b5e" containerName="dnsmasq-dns" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.768982 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="7459bffc-943f-4bb7-a293-952b538a7b5e" containerName="dnsmasq-dns" Nov 28 11:32:17 crc kubenswrapper[4923]: E1128 11:32:17.769002 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7459bffc-943f-4bb7-a293-952b538a7b5e" containerName="init" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.769008 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="7459bffc-943f-4bb7-a293-952b538a7b5e" containerName="init" Nov 28 11:32:17 crc kubenswrapper[4923]: E1128 11:32:17.769024 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c056b3a-4b2a-4da3-bfc9-5d6812f5283f" containerName="dnsmasq-dns" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.769031 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c056b3a-4b2a-4da3-bfc9-5d6812f5283f" containerName="dnsmasq-dns" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.769190 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="7459bffc-943f-4bb7-a293-952b538a7b5e" containerName="dnsmasq-dns" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.769211 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c056b3a-4b2a-4da3-bfc9-5d6812f5283f" containerName="dnsmasq-dns" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.769742 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.772720 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.772816 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.773020 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.777893 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2xnkl" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.787182 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw"] Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.892687 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8298cf34-2702-46ec-a4e0-002988266a81-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw\" (UID: \"8298cf34-2702-46ec-a4e0-002988266a81\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.892759 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8298cf34-2702-46ec-a4e0-002988266a81-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw\" (UID: \"8298cf34-2702-46ec-a4e0-002988266a81\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.892836 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8298cf34-2702-46ec-a4e0-002988266a81-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw\" (UID: \"8298cf34-2702-46ec-a4e0-002988266a81\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.892870 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gt6km\" (UniqueName: \"kubernetes.io/projected/8298cf34-2702-46ec-a4e0-002988266a81-kube-api-access-gt6km\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw\" (UID: \"8298cf34-2702-46ec-a4e0-002988266a81\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.995108 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8298cf34-2702-46ec-a4e0-002988266a81-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw\" (UID: \"8298cf34-2702-46ec-a4e0-002988266a81\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.995172 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8298cf34-2702-46ec-a4e0-002988266a81-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw\" (UID: \"8298cf34-2702-46ec-a4e0-002988266a81\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.995243 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8298cf34-2702-46ec-a4e0-002988266a81-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw\" (UID: \"8298cf34-2702-46ec-a4e0-002988266a81\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" Nov 28 11:32:17 crc kubenswrapper[4923]: I1128 11:32:17.995278 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gt6km\" (UniqueName: \"kubernetes.io/projected/8298cf34-2702-46ec-a4e0-002988266a81-kube-api-access-gt6km\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw\" (UID: \"8298cf34-2702-46ec-a4e0-002988266a81\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" Nov 28 11:32:18 crc kubenswrapper[4923]: I1128 11:32:18.002464 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8298cf34-2702-46ec-a4e0-002988266a81-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw\" (UID: \"8298cf34-2702-46ec-a4e0-002988266a81\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" Nov 28 11:32:18 crc kubenswrapper[4923]: I1128 11:32:18.002463 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8298cf34-2702-46ec-a4e0-002988266a81-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw\" (UID: \"8298cf34-2702-46ec-a4e0-002988266a81\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" Nov 28 11:32:18 crc kubenswrapper[4923]: I1128 11:32:18.003129 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8298cf34-2702-46ec-a4e0-002988266a81-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw\" (UID: \"8298cf34-2702-46ec-a4e0-002988266a81\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" Nov 28 11:32:18 crc kubenswrapper[4923]: I1128 11:32:18.024841 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gt6km\" (UniqueName: \"kubernetes.io/projected/8298cf34-2702-46ec-a4e0-002988266a81-kube-api-access-gt6km\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw\" (UID: \"8298cf34-2702-46ec-a4e0-002988266a81\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" Nov 28 11:32:18 crc kubenswrapper[4923]: I1128 11:32:18.089704 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" Nov 28 11:32:18 crc kubenswrapper[4923]: I1128 11:32:18.791697 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw"] Nov 28 11:32:18 crc kubenswrapper[4923]: I1128 11:32:18.830647 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" event={"ID":"8298cf34-2702-46ec-a4e0-002988266a81","Type":"ContainerStarted","Data":"a5f1ca9069a9dce5e697e5495256d7b58805edf277096af15a3d781f90ccbbc0"} Nov 28 11:32:28 crc kubenswrapper[4923]: I1128 11:32:28.802709 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 28 11:32:28 crc kubenswrapper[4923]: I1128 11:32:28.930365 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" event={"ID":"8298cf34-2702-46ec-a4e0-002988266a81","Type":"ContainerStarted","Data":"f8c699fc4470857649000a7e806971ca1eebe9672dbf5fd180e666d99ffe2cec"} Nov 28 11:32:28 crc kubenswrapper[4923]: I1128 11:32:28.954555 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" podStartSLOduration=2.118489757 podStartE2EDuration="11.95453724s" podCreationTimestamp="2025-11-28 11:32:17 +0000 UTC" firstStartedPulling="2025-11-28 11:32:18.803536666 +0000 UTC m=+1417.932220886" lastFinishedPulling="2025-11-28 11:32:28.639584149 +0000 UTC m=+1427.768268369" observedRunningTime="2025-11-28 11:32:28.947652385 +0000 UTC m=+1428.076336595" watchObservedRunningTime="2025-11-28 11:32:28.95453724 +0000 UTC m=+1428.083221450" Nov 28 11:32:30 crc kubenswrapper[4923]: I1128 11:32:30.361140 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 28 11:32:40 crc kubenswrapper[4923]: I1128 11:32:40.053458 4923 generic.go:334] "Generic (PLEG): container finished" podID="8298cf34-2702-46ec-a4e0-002988266a81" containerID="f8c699fc4470857649000a7e806971ca1eebe9672dbf5fd180e666d99ffe2cec" exitCode=0 Nov 28 11:32:40 crc kubenswrapper[4923]: I1128 11:32:40.053601 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" event={"ID":"8298cf34-2702-46ec-a4e0-002988266a81","Type":"ContainerDied","Data":"f8c699fc4470857649000a7e806971ca1eebe9672dbf5fd180e666d99ffe2cec"} Nov 28 11:32:41 crc kubenswrapper[4923]: I1128 11:32:41.495510 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" Nov 28 11:32:41 crc kubenswrapper[4923]: I1128 11:32:41.588097 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gt6km\" (UniqueName: \"kubernetes.io/projected/8298cf34-2702-46ec-a4e0-002988266a81-kube-api-access-gt6km\") pod \"8298cf34-2702-46ec-a4e0-002988266a81\" (UID: \"8298cf34-2702-46ec-a4e0-002988266a81\") " Nov 28 11:32:41 crc kubenswrapper[4923]: I1128 11:32:41.588159 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8298cf34-2702-46ec-a4e0-002988266a81-ssh-key\") pod \"8298cf34-2702-46ec-a4e0-002988266a81\" (UID: \"8298cf34-2702-46ec-a4e0-002988266a81\") " Nov 28 11:32:41 crc kubenswrapper[4923]: I1128 11:32:41.588260 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8298cf34-2702-46ec-a4e0-002988266a81-inventory\") pod \"8298cf34-2702-46ec-a4e0-002988266a81\" (UID: \"8298cf34-2702-46ec-a4e0-002988266a81\") " Nov 28 11:32:41 crc kubenswrapper[4923]: I1128 11:32:41.588374 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8298cf34-2702-46ec-a4e0-002988266a81-repo-setup-combined-ca-bundle\") pod \"8298cf34-2702-46ec-a4e0-002988266a81\" (UID: \"8298cf34-2702-46ec-a4e0-002988266a81\") " Nov 28 11:32:41 crc kubenswrapper[4923]: I1128 11:32:41.594142 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8298cf34-2702-46ec-a4e0-002988266a81-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "8298cf34-2702-46ec-a4e0-002988266a81" (UID: "8298cf34-2702-46ec-a4e0-002988266a81"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:32:41 crc kubenswrapper[4923]: I1128 11:32:41.594182 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8298cf34-2702-46ec-a4e0-002988266a81-kube-api-access-gt6km" (OuterVolumeSpecName: "kube-api-access-gt6km") pod "8298cf34-2702-46ec-a4e0-002988266a81" (UID: "8298cf34-2702-46ec-a4e0-002988266a81"). InnerVolumeSpecName "kube-api-access-gt6km". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:32:41 crc kubenswrapper[4923]: I1128 11:32:41.618058 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8298cf34-2702-46ec-a4e0-002988266a81-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8298cf34-2702-46ec-a4e0-002988266a81" (UID: "8298cf34-2702-46ec-a4e0-002988266a81"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:32:41 crc kubenswrapper[4923]: I1128 11:32:41.627428 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8298cf34-2702-46ec-a4e0-002988266a81-inventory" (OuterVolumeSpecName: "inventory") pod "8298cf34-2702-46ec-a4e0-002988266a81" (UID: "8298cf34-2702-46ec-a4e0-002988266a81"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:32:41 crc kubenswrapper[4923]: I1128 11:32:41.690766 4923 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8298cf34-2702-46ec-a4e0-002988266a81-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:32:41 crc kubenswrapper[4923]: I1128 11:32:41.690824 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gt6km\" (UniqueName: \"kubernetes.io/projected/8298cf34-2702-46ec-a4e0-002988266a81-kube-api-access-gt6km\") on node \"crc\" DevicePath \"\"" Nov 28 11:32:41 crc kubenswrapper[4923]: I1128 11:32:41.690844 4923 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8298cf34-2702-46ec-a4e0-002988266a81-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 11:32:41 crc kubenswrapper[4923]: I1128 11:32:41.690866 4923 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8298cf34-2702-46ec-a4e0-002988266a81-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.088641 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" event={"ID":"8298cf34-2702-46ec-a4e0-002988266a81","Type":"ContainerDied","Data":"a5f1ca9069a9dce5e697e5495256d7b58805edf277096af15a3d781f90ccbbc0"} Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.088680 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a5f1ca9069a9dce5e697e5495256d7b58805edf277096af15a3d781f90ccbbc0" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.088737 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.159448 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk"] Nov 28 11:32:42 crc kubenswrapper[4923]: E1128 11:32:42.159867 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8298cf34-2702-46ec-a4e0-002988266a81" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.159889 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="8298cf34-2702-46ec-a4e0-002988266a81" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.160166 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="8298cf34-2702-46ec-a4e0-002988266a81" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.160898 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.163977 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.164587 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.164875 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2xnkl" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.166541 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.175752 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk"] Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.199111 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff94fcb0-8bac-4b68-b732-c20bd131c50f-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk\" (UID: \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.199174 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbxcn\" (UniqueName: \"kubernetes.io/projected/ff94fcb0-8bac-4b68-b732-c20bd131c50f-kube-api-access-pbxcn\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk\" (UID: \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.199273 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff94fcb0-8bac-4b68-b732-c20bd131c50f-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk\" (UID: \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.199293 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff94fcb0-8bac-4b68-b732-c20bd131c50f-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk\" (UID: \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.301190 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff94fcb0-8bac-4b68-b732-c20bd131c50f-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk\" (UID: \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.301275 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff94fcb0-8bac-4b68-b732-c20bd131c50f-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk\" (UID: \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.301535 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff94fcb0-8bac-4b68-b732-c20bd131c50f-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk\" (UID: \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.301599 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbxcn\" (UniqueName: \"kubernetes.io/projected/ff94fcb0-8bac-4b68-b732-c20bd131c50f-kube-api-access-pbxcn\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk\" (UID: \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.304758 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff94fcb0-8bac-4b68-b732-c20bd131c50f-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk\" (UID: \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.308114 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff94fcb0-8bac-4b68-b732-c20bd131c50f-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk\" (UID: \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.312465 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff94fcb0-8bac-4b68-b732-c20bd131c50f-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk\" (UID: \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.317545 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbxcn\" (UniqueName: \"kubernetes.io/projected/ff94fcb0-8bac-4b68-b732-c20bd131c50f-kube-api-access-pbxcn\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk\" (UID: \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" Nov 28 11:32:42 crc kubenswrapper[4923]: I1128 11:32:42.476578 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" Nov 28 11:32:43 crc kubenswrapper[4923]: I1128 11:32:43.050133 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk"] Nov 28 11:32:43 crc kubenswrapper[4923]: I1128 11:32:43.098611 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" event={"ID":"ff94fcb0-8bac-4b68-b732-c20bd131c50f","Type":"ContainerStarted","Data":"bfdfda388cf1d3cdd2b1dd7c04547fb1ba775fba3130c0e9aaf7c1b17da9890c"} Nov 28 11:32:44 crc kubenswrapper[4923]: I1128 11:32:44.110059 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" event={"ID":"ff94fcb0-8bac-4b68-b732-c20bd131c50f","Type":"ContainerStarted","Data":"94cd1fb47438c94d7637e60275cd4d232f32fb465de06175eebc313c2d6e4c42"} Nov 28 11:32:44 crc kubenswrapper[4923]: I1128 11:32:44.130847 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" podStartSLOduration=1.4598612420000001 podStartE2EDuration="2.130827061s" podCreationTimestamp="2025-11-28 11:32:42 +0000 UTC" firstStartedPulling="2025-11-28 11:32:43.04486047 +0000 UTC m=+1442.173544690" lastFinishedPulling="2025-11-28 11:32:43.715826299 +0000 UTC m=+1442.844510509" observedRunningTime="2025-11-28 11:32:44.128645158 +0000 UTC m=+1443.257329378" watchObservedRunningTime="2025-11-28 11:32:44.130827061 +0000 UTC m=+1443.259511281" Nov 28 11:33:44 crc kubenswrapper[4923]: I1128 11:33:44.026143 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:33:44 crc kubenswrapper[4923]: I1128 11:33:44.026731 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:34:00 crc kubenswrapper[4923]: I1128 11:34:00.748213 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jg4b2"] Nov 28 11:34:00 crc kubenswrapper[4923]: I1128 11:34:00.752170 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jg4b2" Nov 28 11:34:00 crc kubenswrapper[4923]: I1128 11:34:00.761901 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jg4b2"] Nov 28 11:34:00 crc kubenswrapper[4923]: I1128 11:34:00.803145 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hsdc5\" (UniqueName: \"kubernetes.io/projected/6bf7311b-877b-4fda-9cc9-c5b3de843cee-kube-api-access-hsdc5\") pod \"redhat-marketplace-jg4b2\" (UID: \"6bf7311b-877b-4fda-9cc9-c5b3de843cee\") " pod="openshift-marketplace/redhat-marketplace-jg4b2" Nov 28 11:34:00 crc kubenswrapper[4923]: I1128 11:34:00.803343 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bf7311b-877b-4fda-9cc9-c5b3de843cee-catalog-content\") pod \"redhat-marketplace-jg4b2\" (UID: \"6bf7311b-877b-4fda-9cc9-c5b3de843cee\") " pod="openshift-marketplace/redhat-marketplace-jg4b2" Nov 28 11:34:00 crc kubenswrapper[4923]: I1128 11:34:00.803725 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bf7311b-877b-4fda-9cc9-c5b3de843cee-utilities\") pod \"redhat-marketplace-jg4b2\" (UID: \"6bf7311b-877b-4fda-9cc9-c5b3de843cee\") " pod="openshift-marketplace/redhat-marketplace-jg4b2" Nov 28 11:34:00 crc kubenswrapper[4923]: I1128 11:34:00.904983 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bf7311b-877b-4fda-9cc9-c5b3de843cee-utilities\") pod \"redhat-marketplace-jg4b2\" (UID: \"6bf7311b-877b-4fda-9cc9-c5b3de843cee\") " pod="openshift-marketplace/redhat-marketplace-jg4b2" Nov 28 11:34:00 crc kubenswrapper[4923]: I1128 11:34:00.905070 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hsdc5\" (UniqueName: \"kubernetes.io/projected/6bf7311b-877b-4fda-9cc9-c5b3de843cee-kube-api-access-hsdc5\") pod \"redhat-marketplace-jg4b2\" (UID: \"6bf7311b-877b-4fda-9cc9-c5b3de843cee\") " pod="openshift-marketplace/redhat-marketplace-jg4b2" Nov 28 11:34:00 crc kubenswrapper[4923]: I1128 11:34:00.905137 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bf7311b-877b-4fda-9cc9-c5b3de843cee-catalog-content\") pod \"redhat-marketplace-jg4b2\" (UID: \"6bf7311b-877b-4fda-9cc9-c5b3de843cee\") " pod="openshift-marketplace/redhat-marketplace-jg4b2" Nov 28 11:34:00 crc kubenswrapper[4923]: I1128 11:34:00.905475 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bf7311b-877b-4fda-9cc9-c5b3de843cee-utilities\") pod \"redhat-marketplace-jg4b2\" (UID: \"6bf7311b-877b-4fda-9cc9-c5b3de843cee\") " pod="openshift-marketplace/redhat-marketplace-jg4b2" Nov 28 11:34:00 crc kubenswrapper[4923]: I1128 11:34:00.905772 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bf7311b-877b-4fda-9cc9-c5b3de843cee-catalog-content\") pod \"redhat-marketplace-jg4b2\" (UID: \"6bf7311b-877b-4fda-9cc9-c5b3de843cee\") " pod="openshift-marketplace/redhat-marketplace-jg4b2" Nov 28 11:34:00 crc kubenswrapper[4923]: I1128 11:34:00.922197 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hsdc5\" (UniqueName: \"kubernetes.io/projected/6bf7311b-877b-4fda-9cc9-c5b3de843cee-kube-api-access-hsdc5\") pod \"redhat-marketplace-jg4b2\" (UID: \"6bf7311b-877b-4fda-9cc9-c5b3de843cee\") " pod="openshift-marketplace/redhat-marketplace-jg4b2" Nov 28 11:34:01 crc kubenswrapper[4923]: I1128 11:34:01.076641 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jg4b2" Nov 28 11:34:01 crc kubenswrapper[4923]: I1128 11:34:01.479094 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jg4b2"] Nov 28 11:34:02 crc kubenswrapper[4923]: I1128 11:34:02.062743 4923 generic.go:334] "Generic (PLEG): container finished" podID="6bf7311b-877b-4fda-9cc9-c5b3de843cee" containerID="a64abce8ddd40ba1d1852e7fa2b1864624068f14430c8382146b6997578f50cd" exitCode=0 Nov 28 11:34:02 crc kubenswrapper[4923]: I1128 11:34:02.062921 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jg4b2" event={"ID":"6bf7311b-877b-4fda-9cc9-c5b3de843cee","Type":"ContainerDied","Data":"a64abce8ddd40ba1d1852e7fa2b1864624068f14430c8382146b6997578f50cd"} Nov 28 11:34:02 crc kubenswrapper[4923]: I1128 11:34:02.063044 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jg4b2" event={"ID":"6bf7311b-877b-4fda-9cc9-c5b3de843cee","Type":"ContainerStarted","Data":"306268aadec8b85da6524252322a66fc47348fad746edaac3f07b1e9ebed4b7f"} Nov 28 11:34:05 crc kubenswrapper[4923]: I1128 11:34:05.096324 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jg4b2" event={"ID":"6bf7311b-877b-4fda-9cc9-c5b3de843cee","Type":"ContainerStarted","Data":"0c82bf44fd84896c2a3115bd1e2c311ad326fa706fe2254fdb0fd7526686c3f7"} Nov 28 11:34:05 crc kubenswrapper[4923]: I1128 11:34:05.463910 4923 scope.go:117] "RemoveContainer" containerID="e2322e4c37f86cdad09834dca1076b76a685f0ee16d49528d28561ad073215ec" Nov 28 11:34:05 crc kubenswrapper[4923]: I1128 11:34:05.497026 4923 scope.go:117] "RemoveContainer" containerID="a1f0106d44dc872d919ac5ba42c4dcec113aec34c0d39ab29a15fdb9693351f6" Nov 28 11:34:06 crc kubenswrapper[4923]: I1128 11:34:06.113036 4923 generic.go:334] "Generic (PLEG): container finished" podID="6bf7311b-877b-4fda-9cc9-c5b3de843cee" containerID="0c82bf44fd84896c2a3115bd1e2c311ad326fa706fe2254fdb0fd7526686c3f7" exitCode=0 Nov 28 11:34:06 crc kubenswrapper[4923]: I1128 11:34:06.113100 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jg4b2" event={"ID":"6bf7311b-877b-4fda-9cc9-c5b3de843cee","Type":"ContainerDied","Data":"0c82bf44fd84896c2a3115bd1e2c311ad326fa706fe2254fdb0fd7526686c3f7"} Nov 28 11:34:07 crc kubenswrapper[4923]: I1128 11:34:07.127958 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jg4b2" event={"ID":"6bf7311b-877b-4fda-9cc9-c5b3de843cee","Type":"ContainerStarted","Data":"c7714844c867352163faa462063eb12b80806d76d9963f8600660c8faea6bf0b"} Nov 28 11:34:07 crc kubenswrapper[4923]: I1128 11:34:07.154823 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jg4b2" podStartSLOduration=2.708288098 podStartE2EDuration="7.154799892s" podCreationTimestamp="2025-11-28 11:34:00 +0000 UTC" firstStartedPulling="2025-11-28 11:34:02.067099565 +0000 UTC m=+1521.195783775" lastFinishedPulling="2025-11-28 11:34:06.513611319 +0000 UTC m=+1525.642295569" observedRunningTime="2025-11-28 11:34:07.150321455 +0000 UTC m=+1526.279005665" watchObservedRunningTime="2025-11-28 11:34:07.154799892 +0000 UTC m=+1526.283484102" Nov 28 11:34:11 crc kubenswrapper[4923]: I1128 11:34:11.077294 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jg4b2" Nov 28 11:34:11 crc kubenswrapper[4923]: I1128 11:34:11.078148 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jg4b2" Nov 28 11:34:11 crc kubenswrapper[4923]: I1128 11:34:11.127534 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jg4b2" Nov 28 11:34:11 crc kubenswrapper[4923]: I1128 11:34:11.229287 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jg4b2" Nov 28 11:34:11 crc kubenswrapper[4923]: I1128 11:34:11.370357 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jg4b2"] Nov 28 11:34:13 crc kubenswrapper[4923]: I1128 11:34:13.187747 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jg4b2" podUID="6bf7311b-877b-4fda-9cc9-c5b3de843cee" containerName="registry-server" containerID="cri-o://c7714844c867352163faa462063eb12b80806d76d9963f8600660c8faea6bf0b" gracePeriod=2 Nov 28 11:34:13 crc kubenswrapper[4923]: I1128 11:34:13.599043 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jg4b2" Nov 28 11:34:13 crc kubenswrapper[4923]: I1128 11:34:13.690118 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hsdc5\" (UniqueName: \"kubernetes.io/projected/6bf7311b-877b-4fda-9cc9-c5b3de843cee-kube-api-access-hsdc5\") pod \"6bf7311b-877b-4fda-9cc9-c5b3de843cee\" (UID: \"6bf7311b-877b-4fda-9cc9-c5b3de843cee\") " Nov 28 11:34:13 crc kubenswrapper[4923]: I1128 11:34:13.690355 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bf7311b-877b-4fda-9cc9-c5b3de843cee-catalog-content\") pod \"6bf7311b-877b-4fda-9cc9-c5b3de843cee\" (UID: \"6bf7311b-877b-4fda-9cc9-c5b3de843cee\") " Nov 28 11:34:13 crc kubenswrapper[4923]: I1128 11:34:13.690407 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bf7311b-877b-4fda-9cc9-c5b3de843cee-utilities\") pod \"6bf7311b-877b-4fda-9cc9-c5b3de843cee\" (UID: \"6bf7311b-877b-4fda-9cc9-c5b3de843cee\") " Nov 28 11:34:13 crc kubenswrapper[4923]: I1128 11:34:13.691549 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bf7311b-877b-4fda-9cc9-c5b3de843cee-utilities" (OuterVolumeSpecName: "utilities") pod "6bf7311b-877b-4fda-9cc9-c5b3de843cee" (UID: "6bf7311b-877b-4fda-9cc9-c5b3de843cee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:34:13 crc kubenswrapper[4923]: I1128 11:34:13.699413 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bf7311b-877b-4fda-9cc9-c5b3de843cee-kube-api-access-hsdc5" (OuterVolumeSpecName: "kube-api-access-hsdc5") pod "6bf7311b-877b-4fda-9cc9-c5b3de843cee" (UID: "6bf7311b-877b-4fda-9cc9-c5b3de843cee"). InnerVolumeSpecName "kube-api-access-hsdc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:34:13 crc kubenswrapper[4923]: I1128 11:34:13.716753 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bf7311b-877b-4fda-9cc9-c5b3de843cee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6bf7311b-877b-4fda-9cc9-c5b3de843cee" (UID: "6bf7311b-877b-4fda-9cc9-c5b3de843cee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:34:13 crc kubenswrapper[4923]: I1128 11:34:13.793381 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hsdc5\" (UniqueName: \"kubernetes.io/projected/6bf7311b-877b-4fda-9cc9-c5b3de843cee-kube-api-access-hsdc5\") on node \"crc\" DevicePath \"\"" Nov 28 11:34:13 crc kubenswrapper[4923]: I1128 11:34:13.793762 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bf7311b-877b-4fda-9cc9-c5b3de843cee-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:34:13 crc kubenswrapper[4923]: I1128 11:34:13.793790 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bf7311b-877b-4fda-9cc9-c5b3de843cee-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:34:14 crc kubenswrapper[4923]: I1128 11:34:14.025925 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:34:14 crc kubenswrapper[4923]: I1128 11:34:14.026030 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:34:14 crc kubenswrapper[4923]: I1128 11:34:14.199611 4923 generic.go:334] "Generic (PLEG): container finished" podID="6bf7311b-877b-4fda-9cc9-c5b3de843cee" containerID="c7714844c867352163faa462063eb12b80806d76d9963f8600660c8faea6bf0b" exitCode=0 Nov 28 11:34:14 crc kubenswrapper[4923]: I1128 11:34:14.199676 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jg4b2" event={"ID":"6bf7311b-877b-4fda-9cc9-c5b3de843cee","Type":"ContainerDied","Data":"c7714844c867352163faa462063eb12b80806d76d9963f8600660c8faea6bf0b"} Nov 28 11:34:14 crc kubenswrapper[4923]: I1128 11:34:14.199719 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jg4b2" event={"ID":"6bf7311b-877b-4fda-9cc9-c5b3de843cee","Type":"ContainerDied","Data":"306268aadec8b85da6524252322a66fc47348fad746edaac3f07b1e9ebed4b7f"} Nov 28 11:34:14 crc kubenswrapper[4923]: I1128 11:34:14.199714 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jg4b2" Nov 28 11:34:14 crc kubenswrapper[4923]: I1128 11:34:14.199741 4923 scope.go:117] "RemoveContainer" containerID="c7714844c867352163faa462063eb12b80806d76d9963f8600660c8faea6bf0b" Nov 28 11:34:14 crc kubenswrapper[4923]: I1128 11:34:14.231393 4923 scope.go:117] "RemoveContainer" containerID="0c82bf44fd84896c2a3115bd1e2c311ad326fa706fe2254fdb0fd7526686c3f7" Nov 28 11:34:14 crc kubenswrapper[4923]: I1128 11:34:14.253434 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jg4b2"] Nov 28 11:34:14 crc kubenswrapper[4923]: I1128 11:34:14.263519 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jg4b2"] Nov 28 11:34:14 crc kubenswrapper[4923]: I1128 11:34:14.271672 4923 scope.go:117] "RemoveContainer" containerID="a64abce8ddd40ba1d1852e7fa2b1864624068f14430c8382146b6997578f50cd" Nov 28 11:34:14 crc kubenswrapper[4923]: I1128 11:34:14.317754 4923 scope.go:117] "RemoveContainer" containerID="c7714844c867352163faa462063eb12b80806d76d9963f8600660c8faea6bf0b" Nov 28 11:34:14 crc kubenswrapper[4923]: E1128 11:34:14.319283 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7714844c867352163faa462063eb12b80806d76d9963f8600660c8faea6bf0b\": container with ID starting with c7714844c867352163faa462063eb12b80806d76d9963f8600660c8faea6bf0b not found: ID does not exist" containerID="c7714844c867352163faa462063eb12b80806d76d9963f8600660c8faea6bf0b" Nov 28 11:34:14 crc kubenswrapper[4923]: I1128 11:34:14.319334 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7714844c867352163faa462063eb12b80806d76d9963f8600660c8faea6bf0b"} err="failed to get container status \"c7714844c867352163faa462063eb12b80806d76d9963f8600660c8faea6bf0b\": rpc error: code = NotFound desc = could not find container \"c7714844c867352163faa462063eb12b80806d76d9963f8600660c8faea6bf0b\": container with ID starting with c7714844c867352163faa462063eb12b80806d76d9963f8600660c8faea6bf0b not found: ID does not exist" Nov 28 11:34:14 crc kubenswrapper[4923]: I1128 11:34:14.319360 4923 scope.go:117] "RemoveContainer" containerID="0c82bf44fd84896c2a3115bd1e2c311ad326fa706fe2254fdb0fd7526686c3f7" Nov 28 11:34:14 crc kubenswrapper[4923]: E1128 11:34:14.319841 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c82bf44fd84896c2a3115bd1e2c311ad326fa706fe2254fdb0fd7526686c3f7\": container with ID starting with 0c82bf44fd84896c2a3115bd1e2c311ad326fa706fe2254fdb0fd7526686c3f7 not found: ID does not exist" containerID="0c82bf44fd84896c2a3115bd1e2c311ad326fa706fe2254fdb0fd7526686c3f7" Nov 28 11:34:14 crc kubenswrapper[4923]: I1128 11:34:14.319903 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c82bf44fd84896c2a3115bd1e2c311ad326fa706fe2254fdb0fd7526686c3f7"} err="failed to get container status \"0c82bf44fd84896c2a3115bd1e2c311ad326fa706fe2254fdb0fd7526686c3f7\": rpc error: code = NotFound desc = could not find container \"0c82bf44fd84896c2a3115bd1e2c311ad326fa706fe2254fdb0fd7526686c3f7\": container with ID starting with 0c82bf44fd84896c2a3115bd1e2c311ad326fa706fe2254fdb0fd7526686c3f7 not found: ID does not exist" Nov 28 11:34:14 crc kubenswrapper[4923]: I1128 11:34:14.319971 4923 scope.go:117] "RemoveContainer" containerID="a64abce8ddd40ba1d1852e7fa2b1864624068f14430c8382146b6997578f50cd" Nov 28 11:34:14 crc kubenswrapper[4923]: E1128 11:34:14.320990 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a64abce8ddd40ba1d1852e7fa2b1864624068f14430c8382146b6997578f50cd\": container with ID starting with a64abce8ddd40ba1d1852e7fa2b1864624068f14430c8382146b6997578f50cd not found: ID does not exist" containerID="a64abce8ddd40ba1d1852e7fa2b1864624068f14430c8382146b6997578f50cd" Nov 28 11:34:14 crc kubenswrapper[4923]: I1128 11:34:14.321037 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a64abce8ddd40ba1d1852e7fa2b1864624068f14430c8382146b6997578f50cd"} err="failed to get container status \"a64abce8ddd40ba1d1852e7fa2b1864624068f14430c8382146b6997578f50cd\": rpc error: code = NotFound desc = could not find container \"a64abce8ddd40ba1d1852e7fa2b1864624068f14430c8382146b6997578f50cd\": container with ID starting with a64abce8ddd40ba1d1852e7fa2b1864624068f14430c8382146b6997578f50cd not found: ID does not exist" Nov 28 11:34:15 crc kubenswrapper[4923]: I1128 11:34:15.469911 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6bf7311b-877b-4fda-9cc9-c5b3de843cee" path="/var/lib/kubelet/pods/6bf7311b-877b-4fda-9cc9-c5b3de843cee/volumes" Nov 28 11:34:44 crc kubenswrapper[4923]: I1128 11:34:44.026343 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:34:44 crc kubenswrapper[4923]: I1128 11:34:44.027087 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:34:44 crc kubenswrapper[4923]: I1128 11:34:44.027187 4923 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:34:44 crc kubenswrapper[4923]: I1128 11:34:44.028134 4923 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a"} pod="openshift-machine-config-operator/machine-config-daemon-bwdth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 11:34:44 crc kubenswrapper[4923]: I1128 11:34:44.028242 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" containerID="cri-o://59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" gracePeriod=600 Nov 28 11:34:44 crc kubenswrapper[4923]: E1128 11:34:44.167235 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:34:44 crc kubenswrapper[4923]: I1128 11:34:44.804190 4923 generic.go:334] "Generic (PLEG): container finished" podID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" exitCode=0 Nov 28 11:34:44 crc kubenswrapper[4923]: I1128 11:34:44.804230 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerDied","Data":"59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a"} Nov 28 11:34:44 crc kubenswrapper[4923]: I1128 11:34:44.804259 4923 scope.go:117] "RemoveContainer" containerID="d1a2e1beb233079a250c29730400b1c9cdbf26210af36136b746e09631ce81a5" Nov 28 11:34:44 crc kubenswrapper[4923]: I1128 11:34:44.804820 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:34:44 crc kubenswrapper[4923]: E1128 11:34:44.805089 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:34:56 crc kubenswrapper[4923]: I1128 11:34:56.169248 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:34:56 crc kubenswrapper[4923]: E1128 11:34:56.169981 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:35:05 crc kubenswrapper[4923]: I1128 11:35:05.636882 4923 scope.go:117] "RemoveContainer" containerID="5a0cebbe8280150d56107e83737cca6ba9f9fd5ba02b0fb12f54bc8903de7217" Nov 28 11:35:05 crc kubenswrapper[4923]: I1128 11:35:05.675375 4923 scope.go:117] "RemoveContainer" containerID="41a1b39660767d19c4d056837548439d65006a5a6750c72bb01e969231d2b3fe" Nov 28 11:35:08 crc kubenswrapper[4923]: I1128 11:35:08.169111 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:35:08 crc kubenswrapper[4923]: E1128 11:35:08.169584 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:35:20 crc kubenswrapper[4923]: I1128 11:35:20.169956 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:35:20 crc kubenswrapper[4923]: E1128 11:35:20.171158 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:35:33 crc kubenswrapper[4923]: I1128 11:35:33.169638 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:35:33 crc kubenswrapper[4923]: E1128 11:35:33.170583 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:35:46 crc kubenswrapper[4923]: I1128 11:35:46.168552 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:35:46 crc kubenswrapper[4923]: E1128 11:35:46.169492 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:35:57 crc kubenswrapper[4923]: I1128 11:35:57.450612 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-69vmq"] Nov 28 11:35:57 crc kubenswrapper[4923]: E1128 11:35:57.452259 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bf7311b-877b-4fda-9cc9-c5b3de843cee" containerName="extract-utilities" Nov 28 11:35:57 crc kubenswrapper[4923]: I1128 11:35:57.452280 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bf7311b-877b-4fda-9cc9-c5b3de843cee" containerName="extract-utilities" Nov 28 11:35:57 crc kubenswrapper[4923]: E1128 11:35:57.452303 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bf7311b-877b-4fda-9cc9-c5b3de843cee" containerName="registry-server" Nov 28 11:35:57 crc kubenswrapper[4923]: I1128 11:35:57.452312 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bf7311b-877b-4fda-9cc9-c5b3de843cee" containerName="registry-server" Nov 28 11:35:57 crc kubenswrapper[4923]: E1128 11:35:57.452336 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bf7311b-877b-4fda-9cc9-c5b3de843cee" containerName="extract-content" Nov 28 11:35:57 crc kubenswrapper[4923]: I1128 11:35:57.452344 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bf7311b-877b-4fda-9cc9-c5b3de843cee" containerName="extract-content" Nov 28 11:35:57 crc kubenswrapper[4923]: I1128 11:35:57.452871 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bf7311b-877b-4fda-9cc9-c5b3de843cee" containerName="registry-server" Nov 28 11:35:57 crc kubenswrapper[4923]: I1128 11:35:57.471077 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-69vmq" Nov 28 11:35:57 crc kubenswrapper[4923]: I1128 11:35:57.475301 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-69vmq"] Nov 28 11:35:57 crc kubenswrapper[4923]: I1128 11:35:57.581124 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ee8723b-65f2-4105-b01c-cba9b345d69a-utilities\") pod \"certified-operators-69vmq\" (UID: \"0ee8723b-65f2-4105-b01c-cba9b345d69a\") " pod="openshift-marketplace/certified-operators-69vmq" Nov 28 11:35:57 crc kubenswrapper[4923]: I1128 11:35:57.581256 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmjkz\" (UniqueName: \"kubernetes.io/projected/0ee8723b-65f2-4105-b01c-cba9b345d69a-kube-api-access-wmjkz\") pod \"certified-operators-69vmq\" (UID: \"0ee8723b-65f2-4105-b01c-cba9b345d69a\") " pod="openshift-marketplace/certified-operators-69vmq" Nov 28 11:35:57 crc kubenswrapper[4923]: I1128 11:35:57.581287 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ee8723b-65f2-4105-b01c-cba9b345d69a-catalog-content\") pod \"certified-operators-69vmq\" (UID: \"0ee8723b-65f2-4105-b01c-cba9b345d69a\") " pod="openshift-marketplace/certified-operators-69vmq" Nov 28 11:35:57 crc kubenswrapper[4923]: I1128 11:35:57.682373 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmjkz\" (UniqueName: \"kubernetes.io/projected/0ee8723b-65f2-4105-b01c-cba9b345d69a-kube-api-access-wmjkz\") pod \"certified-operators-69vmq\" (UID: \"0ee8723b-65f2-4105-b01c-cba9b345d69a\") " pod="openshift-marketplace/certified-operators-69vmq" Nov 28 11:35:57 crc kubenswrapper[4923]: I1128 11:35:57.682431 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ee8723b-65f2-4105-b01c-cba9b345d69a-catalog-content\") pod \"certified-operators-69vmq\" (UID: \"0ee8723b-65f2-4105-b01c-cba9b345d69a\") " pod="openshift-marketplace/certified-operators-69vmq" Nov 28 11:35:57 crc kubenswrapper[4923]: I1128 11:35:57.682483 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ee8723b-65f2-4105-b01c-cba9b345d69a-utilities\") pod \"certified-operators-69vmq\" (UID: \"0ee8723b-65f2-4105-b01c-cba9b345d69a\") " pod="openshift-marketplace/certified-operators-69vmq" Nov 28 11:35:57 crc kubenswrapper[4923]: I1128 11:35:57.683034 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ee8723b-65f2-4105-b01c-cba9b345d69a-utilities\") pod \"certified-operators-69vmq\" (UID: \"0ee8723b-65f2-4105-b01c-cba9b345d69a\") " pod="openshift-marketplace/certified-operators-69vmq" Nov 28 11:35:57 crc kubenswrapper[4923]: I1128 11:35:57.683505 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ee8723b-65f2-4105-b01c-cba9b345d69a-catalog-content\") pod \"certified-operators-69vmq\" (UID: \"0ee8723b-65f2-4105-b01c-cba9b345d69a\") " pod="openshift-marketplace/certified-operators-69vmq" Nov 28 11:35:57 crc kubenswrapper[4923]: I1128 11:35:57.701643 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmjkz\" (UniqueName: \"kubernetes.io/projected/0ee8723b-65f2-4105-b01c-cba9b345d69a-kube-api-access-wmjkz\") pod \"certified-operators-69vmq\" (UID: \"0ee8723b-65f2-4105-b01c-cba9b345d69a\") " pod="openshift-marketplace/certified-operators-69vmq" Nov 28 11:35:57 crc kubenswrapper[4923]: I1128 11:35:57.800472 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-69vmq" Nov 28 11:35:58 crc kubenswrapper[4923]: I1128 11:35:58.250854 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-69vmq"] Nov 28 11:35:58 crc kubenswrapper[4923]: I1128 11:35:58.558948 4923 generic.go:334] "Generic (PLEG): container finished" podID="0ee8723b-65f2-4105-b01c-cba9b345d69a" containerID="b46e835e6996edce1bd847010347c723cc848e59c761d7274da70d30d61834b5" exitCode=0 Nov 28 11:35:58 crc kubenswrapper[4923]: I1128 11:35:58.559001 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-69vmq" event={"ID":"0ee8723b-65f2-4105-b01c-cba9b345d69a","Type":"ContainerDied","Data":"b46e835e6996edce1bd847010347c723cc848e59c761d7274da70d30d61834b5"} Nov 28 11:35:58 crc kubenswrapper[4923]: I1128 11:35:58.559031 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-69vmq" event={"ID":"0ee8723b-65f2-4105-b01c-cba9b345d69a","Type":"ContainerStarted","Data":"e543cf496a51ef944bee1ffe325a26e641df3f9632a72672be4486f6e0ad0274"} Nov 28 11:35:58 crc kubenswrapper[4923]: I1128 11:35:58.560506 4923 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 11:35:59 crc kubenswrapper[4923]: I1128 11:35:59.169096 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:35:59 crc kubenswrapper[4923]: E1128 11:35:59.169438 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:35:59 crc kubenswrapper[4923]: I1128 11:35:59.574104 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-69vmq" event={"ID":"0ee8723b-65f2-4105-b01c-cba9b345d69a","Type":"ContainerStarted","Data":"f8c30d6a96dc3c345b52d1757905d35686486e5ec1b0d38e2b077ac15d9e7e78"} Nov 28 11:36:03 crc kubenswrapper[4923]: I1128 11:36:03.614828 4923 generic.go:334] "Generic (PLEG): container finished" podID="0ee8723b-65f2-4105-b01c-cba9b345d69a" containerID="f8c30d6a96dc3c345b52d1757905d35686486e5ec1b0d38e2b077ac15d9e7e78" exitCode=0 Nov 28 11:36:03 crc kubenswrapper[4923]: I1128 11:36:03.614901 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-69vmq" event={"ID":"0ee8723b-65f2-4105-b01c-cba9b345d69a","Type":"ContainerDied","Data":"f8c30d6a96dc3c345b52d1757905d35686486e5ec1b0d38e2b077ac15d9e7e78"} Nov 28 11:36:05 crc kubenswrapper[4923]: I1128 11:36:05.646878 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-69vmq" event={"ID":"0ee8723b-65f2-4105-b01c-cba9b345d69a","Type":"ContainerStarted","Data":"ae52242024435dfb055ac4a9ad077c198b47190311803b65506d268638b5a843"} Nov 28 11:36:05 crc kubenswrapper[4923]: I1128 11:36:05.676929 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-69vmq" podStartSLOduration=2.786620984 podStartE2EDuration="8.676905367s" podCreationTimestamp="2025-11-28 11:35:57 +0000 UTC" firstStartedPulling="2025-11-28 11:35:58.56032041 +0000 UTC m=+1637.689004620" lastFinishedPulling="2025-11-28 11:36:04.450604793 +0000 UTC m=+1643.579289003" observedRunningTime="2025-11-28 11:36:05.672471841 +0000 UTC m=+1644.801156091" watchObservedRunningTime="2025-11-28 11:36:05.676905367 +0000 UTC m=+1644.805589617" Nov 28 11:36:07 crc kubenswrapper[4923]: I1128 11:36:07.802088 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-69vmq" Nov 28 11:36:07 crc kubenswrapper[4923]: I1128 11:36:07.802483 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-69vmq" Nov 28 11:36:07 crc kubenswrapper[4923]: I1128 11:36:07.880539 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-69vmq" Nov 28 11:36:11 crc kubenswrapper[4923]: I1128 11:36:11.174213 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:36:11 crc kubenswrapper[4923]: E1128 11:36:11.174666 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:36:17 crc kubenswrapper[4923]: I1128 11:36:17.865477 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-69vmq" Nov 28 11:36:17 crc kubenswrapper[4923]: I1128 11:36:17.929606 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-69vmq"] Nov 28 11:36:18 crc kubenswrapper[4923]: I1128 11:36:18.140339 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-69vmq" podUID="0ee8723b-65f2-4105-b01c-cba9b345d69a" containerName="registry-server" containerID="cri-o://ae52242024435dfb055ac4a9ad077c198b47190311803b65506d268638b5a843" gracePeriod=2 Nov 28 11:36:18 crc kubenswrapper[4923]: I1128 11:36:18.625708 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-69vmq" Nov 28 11:36:18 crc kubenswrapper[4923]: I1128 11:36:18.633780 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ee8723b-65f2-4105-b01c-cba9b345d69a-utilities\") pod \"0ee8723b-65f2-4105-b01c-cba9b345d69a\" (UID: \"0ee8723b-65f2-4105-b01c-cba9b345d69a\") " Nov 28 11:36:18 crc kubenswrapper[4923]: I1128 11:36:18.633876 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ee8723b-65f2-4105-b01c-cba9b345d69a-catalog-content\") pod \"0ee8723b-65f2-4105-b01c-cba9b345d69a\" (UID: \"0ee8723b-65f2-4105-b01c-cba9b345d69a\") " Nov 28 11:36:18 crc kubenswrapper[4923]: I1128 11:36:18.634037 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wmjkz\" (UniqueName: \"kubernetes.io/projected/0ee8723b-65f2-4105-b01c-cba9b345d69a-kube-api-access-wmjkz\") pod \"0ee8723b-65f2-4105-b01c-cba9b345d69a\" (UID: \"0ee8723b-65f2-4105-b01c-cba9b345d69a\") " Nov 28 11:36:18 crc kubenswrapper[4923]: I1128 11:36:18.636906 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0ee8723b-65f2-4105-b01c-cba9b345d69a-utilities" (OuterVolumeSpecName: "utilities") pod "0ee8723b-65f2-4105-b01c-cba9b345d69a" (UID: "0ee8723b-65f2-4105-b01c-cba9b345d69a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:36:18 crc kubenswrapper[4923]: I1128 11:36:18.648218 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ee8723b-65f2-4105-b01c-cba9b345d69a-kube-api-access-wmjkz" (OuterVolumeSpecName: "kube-api-access-wmjkz") pod "0ee8723b-65f2-4105-b01c-cba9b345d69a" (UID: "0ee8723b-65f2-4105-b01c-cba9b345d69a"). InnerVolumeSpecName "kube-api-access-wmjkz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:36:18 crc kubenswrapper[4923]: I1128 11:36:18.692376 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0ee8723b-65f2-4105-b01c-cba9b345d69a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0ee8723b-65f2-4105-b01c-cba9b345d69a" (UID: "0ee8723b-65f2-4105-b01c-cba9b345d69a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:36:18 crc kubenswrapper[4923]: I1128 11:36:18.736503 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ee8723b-65f2-4105-b01c-cba9b345d69a-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:36:18 crc kubenswrapper[4923]: I1128 11:36:18.736543 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ee8723b-65f2-4105-b01c-cba9b345d69a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:36:18 crc kubenswrapper[4923]: I1128 11:36:18.736559 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wmjkz\" (UniqueName: \"kubernetes.io/projected/0ee8723b-65f2-4105-b01c-cba9b345d69a-kube-api-access-wmjkz\") on node \"crc\" DevicePath \"\"" Nov 28 11:36:19 crc kubenswrapper[4923]: I1128 11:36:19.154491 4923 generic.go:334] "Generic (PLEG): container finished" podID="0ee8723b-65f2-4105-b01c-cba9b345d69a" containerID="ae52242024435dfb055ac4a9ad077c198b47190311803b65506d268638b5a843" exitCode=0 Nov 28 11:36:19 crc kubenswrapper[4923]: I1128 11:36:19.154749 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-69vmq" Nov 28 11:36:19 crc kubenswrapper[4923]: I1128 11:36:19.154785 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-69vmq" event={"ID":"0ee8723b-65f2-4105-b01c-cba9b345d69a","Type":"ContainerDied","Data":"ae52242024435dfb055ac4a9ad077c198b47190311803b65506d268638b5a843"} Nov 28 11:36:19 crc kubenswrapper[4923]: I1128 11:36:19.157791 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-69vmq" event={"ID":"0ee8723b-65f2-4105-b01c-cba9b345d69a","Type":"ContainerDied","Data":"e543cf496a51ef944bee1ffe325a26e641df3f9632a72672be4486f6e0ad0274"} Nov 28 11:36:19 crc kubenswrapper[4923]: I1128 11:36:19.157824 4923 scope.go:117] "RemoveContainer" containerID="ae52242024435dfb055ac4a9ad077c198b47190311803b65506d268638b5a843" Nov 28 11:36:19 crc kubenswrapper[4923]: I1128 11:36:19.209887 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-69vmq"] Nov 28 11:36:19 crc kubenswrapper[4923]: I1128 11:36:19.226612 4923 scope.go:117] "RemoveContainer" containerID="f8c30d6a96dc3c345b52d1757905d35686486e5ec1b0d38e2b077ac15d9e7e78" Nov 28 11:36:19 crc kubenswrapper[4923]: I1128 11:36:19.226675 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-69vmq"] Nov 28 11:36:19 crc kubenswrapper[4923]: I1128 11:36:19.250011 4923 scope.go:117] "RemoveContainer" containerID="b46e835e6996edce1bd847010347c723cc848e59c761d7274da70d30d61834b5" Nov 28 11:36:19 crc kubenswrapper[4923]: I1128 11:36:19.313051 4923 scope.go:117] "RemoveContainer" containerID="ae52242024435dfb055ac4a9ad077c198b47190311803b65506d268638b5a843" Nov 28 11:36:19 crc kubenswrapper[4923]: E1128 11:36:19.313575 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae52242024435dfb055ac4a9ad077c198b47190311803b65506d268638b5a843\": container with ID starting with ae52242024435dfb055ac4a9ad077c198b47190311803b65506d268638b5a843 not found: ID does not exist" containerID="ae52242024435dfb055ac4a9ad077c198b47190311803b65506d268638b5a843" Nov 28 11:36:19 crc kubenswrapper[4923]: I1128 11:36:19.313614 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae52242024435dfb055ac4a9ad077c198b47190311803b65506d268638b5a843"} err="failed to get container status \"ae52242024435dfb055ac4a9ad077c198b47190311803b65506d268638b5a843\": rpc error: code = NotFound desc = could not find container \"ae52242024435dfb055ac4a9ad077c198b47190311803b65506d268638b5a843\": container with ID starting with ae52242024435dfb055ac4a9ad077c198b47190311803b65506d268638b5a843 not found: ID does not exist" Nov 28 11:36:19 crc kubenswrapper[4923]: I1128 11:36:19.313641 4923 scope.go:117] "RemoveContainer" containerID="f8c30d6a96dc3c345b52d1757905d35686486e5ec1b0d38e2b077ac15d9e7e78" Nov 28 11:36:19 crc kubenswrapper[4923]: E1128 11:36:19.314595 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8c30d6a96dc3c345b52d1757905d35686486e5ec1b0d38e2b077ac15d9e7e78\": container with ID starting with f8c30d6a96dc3c345b52d1757905d35686486e5ec1b0d38e2b077ac15d9e7e78 not found: ID does not exist" containerID="f8c30d6a96dc3c345b52d1757905d35686486e5ec1b0d38e2b077ac15d9e7e78" Nov 28 11:36:19 crc kubenswrapper[4923]: I1128 11:36:19.314646 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8c30d6a96dc3c345b52d1757905d35686486e5ec1b0d38e2b077ac15d9e7e78"} err="failed to get container status \"f8c30d6a96dc3c345b52d1757905d35686486e5ec1b0d38e2b077ac15d9e7e78\": rpc error: code = NotFound desc = could not find container \"f8c30d6a96dc3c345b52d1757905d35686486e5ec1b0d38e2b077ac15d9e7e78\": container with ID starting with f8c30d6a96dc3c345b52d1757905d35686486e5ec1b0d38e2b077ac15d9e7e78 not found: ID does not exist" Nov 28 11:36:19 crc kubenswrapper[4923]: I1128 11:36:19.314673 4923 scope.go:117] "RemoveContainer" containerID="b46e835e6996edce1bd847010347c723cc848e59c761d7274da70d30d61834b5" Nov 28 11:36:19 crc kubenswrapper[4923]: E1128 11:36:19.315050 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b46e835e6996edce1bd847010347c723cc848e59c761d7274da70d30d61834b5\": container with ID starting with b46e835e6996edce1bd847010347c723cc848e59c761d7274da70d30d61834b5 not found: ID does not exist" containerID="b46e835e6996edce1bd847010347c723cc848e59c761d7274da70d30d61834b5" Nov 28 11:36:19 crc kubenswrapper[4923]: I1128 11:36:19.315092 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b46e835e6996edce1bd847010347c723cc848e59c761d7274da70d30d61834b5"} err="failed to get container status \"b46e835e6996edce1bd847010347c723cc848e59c761d7274da70d30d61834b5\": rpc error: code = NotFound desc = could not find container \"b46e835e6996edce1bd847010347c723cc848e59c761d7274da70d30d61834b5\": container with ID starting with b46e835e6996edce1bd847010347c723cc848e59c761d7274da70d30d61834b5 not found: ID does not exist" Nov 28 11:36:21 crc kubenswrapper[4923]: I1128 11:36:21.180636 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ee8723b-65f2-4105-b01c-cba9b345d69a" path="/var/lib/kubelet/pods/0ee8723b-65f2-4105-b01c-cba9b345d69a/volumes" Nov 28 11:36:22 crc kubenswrapper[4923]: I1128 11:36:22.169144 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:36:22 crc kubenswrapper[4923]: E1128 11:36:22.169711 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:36:35 crc kubenswrapper[4923]: I1128 11:36:35.169479 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:36:35 crc kubenswrapper[4923]: E1128 11:36:35.170318 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:36:42 crc kubenswrapper[4923]: I1128 11:36:42.400432 4923 generic.go:334] "Generic (PLEG): container finished" podID="ff94fcb0-8bac-4b68-b732-c20bd131c50f" containerID="94cd1fb47438c94d7637e60275cd4d232f32fb465de06175eebc313c2d6e4c42" exitCode=0 Nov 28 11:36:42 crc kubenswrapper[4923]: I1128 11:36:42.400559 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" event={"ID":"ff94fcb0-8bac-4b68-b732-c20bd131c50f","Type":"ContainerDied","Data":"94cd1fb47438c94d7637e60275cd4d232f32fb465de06175eebc313c2d6e4c42"} Nov 28 11:36:43 crc kubenswrapper[4923]: I1128 11:36:43.844442 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.009227 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff94fcb0-8bac-4b68-b732-c20bd131c50f-bootstrap-combined-ca-bundle\") pod \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\" (UID: \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\") " Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.009719 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff94fcb0-8bac-4b68-b732-c20bd131c50f-ssh-key\") pod \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\" (UID: \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\") " Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.009844 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff94fcb0-8bac-4b68-b732-c20bd131c50f-inventory\") pod \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\" (UID: \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\") " Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.010041 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pbxcn\" (UniqueName: \"kubernetes.io/projected/ff94fcb0-8bac-4b68-b732-c20bd131c50f-kube-api-access-pbxcn\") pod \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\" (UID: \"ff94fcb0-8bac-4b68-b732-c20bd131c50f\") " Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.024801 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff94fcb0-8bac-4b68-b732-c20bd131c50f-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "ff94fcb0-8bac-4b68-b732-c20bd131c50f" (UID: "ff94fcb0-8bac-4b68-b732-c20bd131c50f"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.026611 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff94fcb0-8bac-4b68-b732-c20bd131c50f-kube-api-access-pbxcn" (OuterVolumeSpecName: "kube-api-access-pbxcn") pod "ff94fcb0-8bac-4b68-b732-c20bd131c50f" (UID: "ff94fcb0-8bac-4b68-b732-c20bd131c50f"). InnerVolumeSpecName "kube-api-access-pbxcn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.075074 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff94fcb0-8bac-4b68-b732-c20bd131c50f-inventory" (OuterVolumeSpecName: "inventory") pod "ff94fcb0-8bac-4b68-b732-c20bd131c50f" (UID: "ff94fcb0-8bac-4b68-b732-c20bd131c50f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.092456 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff94fcb0-8bac-4b68-b732-c20bd131c50f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ff94fcb0-8bac-4b68-b732-c20bd131c50f" (UID: "ff94fcb0-8bac-4b68-b732-c20bd131c50f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.115306 4923 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff94fcb0-8bac-4b68-b732-c20bd131c50f-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.115369 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pbxcn\" (UniqueName: \"kubernetes.io/projected/ff94fcb0-8bac-4b68-b732-c20bd131c50f-kube-api-access-pbxcn\") on node \"crc\" DevicePath \"\"" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.115402 4923 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff94fcb0-8bac-4b68-b732-c20bd131c50f-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.115427 4923 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff94fcb0-8bac-4b68-b732-c20bd131c50f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.423126 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" event={"ID":"ff94fcb0-8bac-4b68-b732-c20bd131c50f","Type":"ContainerDied","Data":"bfdfda388cf1d3cdd2b1dd7c04547fb1ba775fba3130c0e9aaf7c1b17da9890c"} Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.423161 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bfdfda388cf1d3cdd2b1dd7c04547fb1ba775fba3130c0e9aaf7c1b17da9890c" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.423240 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.542866 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl"] Nov 28 11:36:44 crc kubenswrapper[4923]: E1128 11:36:44.543331 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ee8723b-65f2-4105-b01c-cba9b345d69a" containerName="extract-content" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.543353 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ee8723b-65f2-4105-b01c-cba9b345d69a" containerName="extract-content" Nov 28 11:36:44 crc kubenswrapper[4923]: E1128 11:36:44.543376 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ee8723b-65f2-4105-b01c-cba9b345d69a" containerName="registry-server" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.543385 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ee8723b-65f2-4105-b01c-cba9b345d69a" containerName="registry-server" Nov 28 11:36:44 crc kubenswrapper[4923]: E1128 11:36:44.543413 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ee8723b-65f2-4105-b01c-cba9b345d69a" containerName="extract-utilities" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.543423 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ee8723b-65f2-4105-b01c-cba9b345d69a" containerName="extract-utilities" Nov 28 11:36:44 crc kubenswrapper[4923]: E1128 11:36:44.543436 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff94fcb0-8bac-4b68-b732-c20bd131c50f" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.543445 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff94fcb0-8bac-4b68-b732-c20bd131c50f" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.543654 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ee8723b-65f2-4105-b01c-cba9b345d69a" containerName="registry-server" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.543682 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff94fcb0-8bac-4b68-b732-c20bd131c50f" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.544450 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.547784 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.549092 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.549842 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2xnkl" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.551198 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.555864 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl"] Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.624354 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3063c09c-4f2e-4fdb-b9bc-302e69185203-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl\" (UID: \"3063c09c-4f2e-4fdb-b9bc-302e69185203\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.624430 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3063c09c-4f2e-4fdb-b9bc-302e69185203-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl\" (UID: \"3063c09c-4f2e-4fdb-b9bc-302e69185203\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.624456 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wldfz\" (UniqueName: \"kubernetes.io/projected/3063c09c-4f2e-4fdb-b9bc-302e69185203-kube-api-access-wldfz\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl\" (UID: \"3063c09c-4f2e-4fdb-b9bc-302e69185203\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.726415 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3063c09c-4f2e-4fdb-b9bc-302e69185203-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl\" (UID: \"3063c09c-4f2e-4fdb-b9bc-302e69185203\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.726495 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3063c09c-4f2e-4fdb-b9bc-302e69185203-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl\" (UID: \"3063c09c-4f2e-4fdb-b9bc-302e69185203\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.726525 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wldfz\" (UniqueName: \"kubernetes.io/projected/3063c09c-4f2e-4fdb-b9bc-302e69185203-kube-api-access-wldfz\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl\" (UID: \"3063c09c-4f2e-4fdb-b9bc-302e69185203\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.732173 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3063c09c-4f2e-4fdb-b9bc-302e69185203-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl\" (UID: \"3063c09c-4f2e-4fdb-b9bc-302e69185203\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.735316 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3063c09c-4f2e-4fdb-b9bc-302e69185203-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl\" (UID: \"3063c09c-4f2e-4fdb-b9bc-302e69185203\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.743419 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wldfz\" (UniqueName: \"kubernetes.io/projected/3063c09c-4f2e-4fdb-b9bc-302e69185203-kube-api-access-wldfz\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl\" (UID: \"3063c09c-4f2e-4fdb-b9bc-302e69185203\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl" Nov 28 11:36:44 crc kubenswrapper[4923]: I1128 11:36:44.859994 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl" Nov 28 11:36:45 crc kubenswrapper[4923]: I1128 11:36:45.694471 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl"] Nov 28 11:36:46 crc kubenswrapper[4923]: I1128 11:36:46.447856 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl" event={"ID":"3063c09c-4f2e-4fdb-b9bc-302e69185203","Type":"ContainerStarted","Data":"5c2b4b37979c8e8beb9080270b7f870247eb48fc68990f4ed340344a6a472018"} Nov 28 11:36:47 crc kubenswrapper[4923]: I1128 11:36:47.458893 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl" event={"ID":"3063c09c-4f2e-4fdb-b9bc-302e69185203","Type":"ContainerStarted","Data":"0be6c6a2ccb32bba97093a960e1d374882e416d8e37cede04cc4e76c7151a101"} Nov 28 11:36:47 crc kubenswrapper[4923]: I1128 11:36:47.490229 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl" podStartSLOduration=2.8476168 podStartE2EDuration="3.490201343s" podCreationTimestamp="2025-11-28 11:36:44 +0000 UTC" firstStartedPulling="2025-11-28 11:36:45.710479076 +0000 UTC m=+1684.839163326" lastFinishedPulling="2025-11-28 11:36:46.353063659 +0000 UTC m=+1685.481747869" observedRunningTime="2025-11-28 11:36:47.477207584 +0000 UTC m=+1686.605891824" watchObservedRunningTime="2025-11-28 11:36:47.490201343 +0000 UTC m=+1686.618885563" Nov 28 11:36:48 crc kubenswrapper[4923]: I1128 11:36:48.169219 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:36:48 crc kubenswrapper[4923]: E1128 11:36:48.169740 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:36:59 crc kubenswrapper[4923]: I1128 11:36:59.170804 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:36:59 crc kubenswrapper[4923]: E1128 11:36:59.171830 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:37:00 crc kubenswrapper[4923]: I1128 11:37:00.101785 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-13ae-account-create-update-5xj8w"] Nov 28 11:37:00 crc kubenswrapper[4923]: I1128 11:37:00.112906 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-8dbh7"] Nov 28 11:37:00 crc kubenswrapper[4923]: I1128 11:37:00.125886 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-13ae-account-create-update-5xj8w"] Nov 28 11:37:00 crc kubenswrapper[4923]: I1128 11:37:00.137900 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-8dbh7"] Nov 28 11:37:01 crc kubenswrapper[4923]: I1128 11:37:01.181081 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2a257ac-b03d-4c96-b2e5-bd306b02b489" path="/var/lib/kubelet/pods/c2a257ac-b03d-4c96-b2e5-bd306b02b489/volumes" Nov 28 11:37:01 crc kubenswrapper[4923]: I1128 11:37:01.182213 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebc90f13-4ffb-450c-a5ca-16053a5111d5" path="/var/lib/kubelet/pods/ebc90f13-4ffb-450c-a5ca-16053a5111d5/volumes" Nov 28 11:37:04 crc kubenswrapper[4923]: I1128 11:37:04.042237 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-hjzh6"] Nov 28 11:37:04 crc kubenswrapper[4923]: I1128 11:37:04.058511 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-hjzh6"] Nov 28 11:37:05 crc kubenswrapper[4923]: I1128 11:37:05.088394 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-30e6-account-create-update-m9w5t"] Nov 28 11:37:05 crc kubenswrapper[4923]: I1128 11:37:05.103957 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-x7nxd"] Nov 28 11:37:05 crc kubenswrapper[4923]: I1128 11:37:05.113559 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-30e6-account-create-update-m9w5t"] Nov 28 11:37:05 crc kubenswrapper[4923]: I1128 11:37:05.124928 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-x7nxd"] Nov 28 11:37:05 crc kubenswrapper[4923]: I1128 11:37:05.134834 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-0809-account-create-update-vkdf5"] Nov 28 11:37:05 crc kubenswrapper[4923]: I1128 11:37:05.144657 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-0809-account-create-update-vkdf5"] Nov 28 11:37:05 crc kubenswrapper[4923]: I1128 11:37:05.182773 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d15e393-6a58-4f86-8109-27c62533f866" path="/var/lib/kubelet/pods/3d15e393-6a58-4f86-8109-27c62533f866/volumes" Nov 28 11:37:05 crc kubenswrapper[4923]: I1128 11:37:05.183768 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c1fa907-042f-413b-abcc-4e6fbf23b382" path="/var/lib/kubelet/pods/9c1fa907-042f-413b-abcc-4e6fbf23b382/volumes" Nov 28 11:37:05 crc kubenswrapper[4923]: I1128 11:37:05.184804 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb" path="/var/lib/kubelet/pods/bb2c6ef9-4d96-46a8-b4da-1e2fa56ba3eb/volumes" Nov 28 11:37:05 crc kubenswrapper[4923]: I1128 11:37:05.185760 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6529789-5374-4f6a-89a8-338ebb535753" path="/var/lib/kubelet/pods/f6529789-5374-4f6a-89a8-338ebb535753/volumes" Nov 28 11:37:05 crc kubenswrapper[4923]: I1128 11:37:05.789913 4923 scope.go:117] "RemoveContainer" containerID="0295fe043258bff02eb8d18a771bf4651077844dff9705f5b1afd78596678f24" Nov 28 11:37:05 crc kubenswrapper[4923]: I1128 11:37:05.825702 4923 scope.go:117] "RemoveContainer" containerID="7d91c376b62b075124ec2ede9e3d9d245aaad8f876ab72f4f2caaa9b6d42f539" Nov 28 11:37:05 crc kubenswrapper[4923]: I1128 11:37:05.895215 4923 scope.go:117] "RemoveContainer" containerID="ac8c39960114ff71653053529fd81fb7e45cdced6ca55c468ed1a62d1925e3b8" Nov 28 11:37:05 crc kubenswrapper[4923]: I1128 11:37:05.933519 4923 scope.go:117] "RemoveContainer" containerID="fea46f7f844f2d88fa6a0767377d40b520777029abf99fbe689f42f3efe54b89" Nov 28 11:37:05 crc kubenswrapper[4923]: I1128 11:37:05.976524 4923 scope.go:117] "RemoveContainer" containerID="787dacc52b2fba47783ed7379ed429d7ffefb82825deec4d4e33fe634fc30f6c" Nov 28 11:37:06 crc kubenswrapper[4923]: I1128 11:37:06.017857 4923 scope.go:117] "RemoveContainer" containerID="f2c9315382360f2405010c5b7588c9e9f094c9dbe4ebab29b449b0c669e4199d" Nov 28 11:37:10 crc kubenswrapper[4923]: I1128 11:37:10.168633 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:37:10 crc kubenswrapper[4923]: E1128 11:37:10.169745 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:37:23 crc kubenswrapper[4923]: I1128 11:37:23.169033 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:37:23 crc kubenswrapper[4923]: E1128 11:37:23.169628 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:37:28 crc kubenswrapper[4923]: I1128 11:37:28.052097 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-bdtml"] Nov 28 11:37:28 crc kubenswrapper[4923]: I1128 11:37:28.063875 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-bdtml"] Nov 28 11:37:29 crc kubenswrapper[4923]: I1128 11:37:29.183923 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d575e6ab-598e-4f0d-b33a-60515d1e8d21" path="/var/lib/kubelet/pods/d575e6ab-598e-4f0d-b33a-60515d1e8d21/volumes" Nov 28 11:37:32 crc kubenswrapper[4923]: I1128 11:37:32.044079 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-hzc9c"] Nov 28 11:37:32 crc kubenswrapper[4923]: I1128 11:37:32.055343 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-hzc9c"] Nov 28 11:37:33 crc kubenswrapper[4923]: I1128 11:37:33.045013 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-4cc8-account-create-update-xwrmh"] Nov 28 11:37:33 crc kubenswrapper[4923]: I1128 11:37:33.059444 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-4cc8-account-create-update-xwrmh"] Nov 28 11:37:33 crc kubenswrapper[4923]: I1128 11:37:33.075381 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-kln9b"] Nov 28 11:37:33 crc kubenswrapper[4923]: I1128 11:37:33.093411 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-gtjhb"] Nov 28 11:37:33 crc kubenswrapper[4923]: I1128 11:37:33.100469 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-e24f-account-create-update-72bgf"] Nov 28 11:37:33 crc kubenswrapper[4923]: I1128 11:37:33.107346 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-kln9b"] Nov 28 11:37:33 crc kubenswrapper[4923]: I1128 11:37:33.115528 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-gtjhb"] Nov 28 11:37:33 crc kubenswrapper[4923]: I1128 11:37:33.123854 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-e24f-account-create-update-72bgf"] Nov 28 11:37:33 crc kubenswrapper[4923]: I1128 11:37:33.182648 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68" path="/var/lib/kubelet/pods/5a5b5ef2-d0c3-4cee-ae31-3a4d74171b68/volumes" Nov 28 11:37:33 crc kubenswrapper[4923]: I1128 11:37:33.183682 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68243bc7-2ab3-4632-9ff1-d3af61a0acb3" path="/var/lib/kubelet/pods/68243bc7-2ab3-4632-9ff1-d3af61a0acb3/volumes" Nov 28 11:37:33 crc kubenswrapper[4923]: I1128 11:37:33.184710 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="856d0fb6-7167-4553-b626-aaa75d43f5ab" path="/var/lib/kubelet/pods/856d0fb6-7167-4553-b626-aaa75d43f5ab/volumes" Nov 28 11:37:33 crc kubenswrapper[4923]: I1128 11:37:33.186119 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc24e28a-d913-47b0-a352-9962063ffedf" path="/var/lib/kubelet/pods/bc24e28a-d913-47b0-a352-9962063ffedf/volumes" Nov 28 11:37:33 crc kubenswrapper[4923]: I1128 11:37:33.188283 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa707c0f-6bdb-4597-b001-d457323f04c1" path="/var/lib/kubelet/pods/fa707c0f-6bdb-4597-b001-d457323f04c1/volumes" Nov 28 11:37:35 crc kubenswrapper[4923]: I1128 11:37:35.170081 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:37:35 crc kubenswrapper[4923]: E1128 11:37:35.170501 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:37:36 crc kubenswrapper[4923]: I1128 11:37:36.048968 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-278d-account-create-update-7ws6f"] Nov 28 11:37:36 crc kubenswrapper[4923]: I1128 11:37:36.059824 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-278d-account-create-update-7ws6f"] Nov 28 11:37:37 crc kubenswrapper[4923]: I1128 11:37:37.190254 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06f81fa3-fcd1-4f4d-9dbe-fe659371c477" path="/var/lib/kubelet/pods/06f81fa3-fcd1-4f4d-9dbe-fe659371c477/volumes" Nov 28 11:37:40 crc kubenswrapper[4923]: I1128 11:37:40.051874 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-qfdbm"] Nov 28 11:37:40 crc kubenswrapper[4923]: I1128 11:37:40.067738 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-qfdbm"] Nov 28 11:37:41 crc kubenswrapper[4923]: I1128 11:37:41.183388 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9fca30b0-b933-4526-9006-e477a86836a6" path="/var/lib/kubelet/pods/9fca30b0-b933-4526-9006-e477a86836a6/volumes" Nov 28 11:37:46 crc kubenswrapper[4923]: I1128 11:37:46.169275 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:37:46 crc kubenswrapper[4923]: E1128 11:37:46.169733 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:38:01 crc kubenswrapper[4923]: I1128 11:38:01.182161 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:38:01 crc kubenswrapper[4923]: E1128 11:38:01.183718 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:38:05 crc kubenswrapper[4923]: I1128 11:38:05.044974 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-zvrbl"] Nov 28 11:38:05 crc kubenswrapper[4923]: I1128 11:38:05.054123 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-zvrbl"] Nov 28 11:38:05 crc kubenswrapper[4923]: I1128 11:38:05.197480 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab7a5c22-b1d8-49e8-9420-25485e5dabd7" path="/var/lib/kubelet/pods/ab7a5c22-b1d8-49e8-9420-25485e5dabd7/volumes" Nov 28 11:38:06 crc kubenswrapper[4923]: I1128 11:38:06.172505 4923 scope.go:117] "RemoveContainer" containerID="ec27bbb8d3db358884501b5303609ad0fbe631169d5b05fb5f24743f537863cf" Nov 28 11:38:06 crc kubenswrapper[4923]: I1128 11:38:06.212567 4923 scope.go:117] "RemoveContainer" containerID="d631b6e7278073e1c01f185892c7cc951eeb27d682c41809be4d3a9ba5ef9f10" Nov 28 11:38:06 crc kubenswrapper[4923]: I1128 11:38:06.254408 4923 scope.go:117] "RemoveContainer" containerID="5c8d3a4a6e04a1023fcfdf085467d3bb970d8858f0760117b0a68a45115cc41d" Nov 28 11:38:06 crc kubenswrapper[4923]: I1128 11:38:06.306939 4923 scope.go:117] "RemoveContainer" containerID="93328e0abc8e779b2ab8901ca90188b416c2faa3cbfab4624a1f1142abb42572" Nov 28 11:38:06 crc kubenswrapper[4923]: I1128 11:38:06.809710 4923 scope.go:117] "RemoveContainer" containerID="44aa867d35691dce4770d65b122763fa59ba6faa225d1ce23188f9822c30a7ed" Nov 28 11:38:06 crc kubenswrapper[4923]: I1128 11:38:06.830309 4923 scope.go:117] "RemoveContainer" containerID="f984ab706c058be498ca66cd9c54263456756fc7cc08a8f2c8ff7d4ca754447a" Nov 28 11:38:06 crc kubenswrapper[4923]: I1128 11:38:06.880720 4923 scope.go:117] "RemoveContainer" containerID="2c8398455971b245c9ed814839cce39984693ffe4db72ead897289a105230ce3" Nov 28 11:38:06 crc kubenswrapper[4923]: I1128 11:38:06.907342 4923 scope.go:117] "RemoveContainer" containerID="00598b0cdb16553692679fc804bb9418856adaf21da948b49b3df0a6881092a8" Nov 28 11:38:06 crc kubenswrapper[4923]: I1128 11:38:06.946321 4923 scope.go:117] "RemoveContainer" containerID="f3490ef1976735489e353012e64ae07dfbc4a1a1bc13fce7e22fade279ae7ad1" Nov 28 11:38:10 crc kubenswrapper[4923]: I1128 11:38:10.059069 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-g6z8k"] Nov 28 11:38:10 crc kubenswrapper[4923]: I1128 11:38:10.070359 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-g6z8k"] Nov 28 11:38:10 crc kubenswrapper[4923]: I1128 11:38:10.410552 4923 generic.go:334] "Generic (PLEG): container finished" podID="3063c09c-4f2e-4fdb-b9bc-302e69185203" containerID="0be6c6a2ccb32bba97093a960e1d374882e416d8e37cede04cc4e76c7151a101" exitCode=0 Nov 28 11:38:10 crc kubenswrapper[4923]: I1128 11:38:10.410594 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl" event={"ID":"3063c09c-4f2e-4fdb-b9bc-302e69185203","Type":"ContainerDied","Data":"0be6c6a2ccb32bba97093a960e1d374882e416d8e37cede04cc4e76c7151a101"} Nov 28 11:38:11 crc kubenswrapper[4923]: I1128 11:38:11.189106 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6803cf35-bd54-4b83-a5b1-42cae252f98d" path="/var/lib/kubelet/pods/6803cf35-bd54-4b83-a5b1-42cae252f98d/volumes" Nov 28 11:38:11 crc kubenswrapper[4923]: I1128 11:38:11.954972 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.063116 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3063c09c-4f2e-4fdb-b9bc-302e69185203-inventory\") pod \"3063c09c-4f2e-4fdb-b9bc-302e69185203\" (UID: \"3063c09c-4f2e-4fdb-b9bc-302e69185203\") " Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.063172 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3063c09c-4f2e-4fdb-b9bc-302e69185203-ssh-key\") pod \"3063c09c-4f2e-4fdb-b9bc-302e69185203\" (UID: \"3063c09c-4f2e-4fdb-b9bc-302e69185203\") " Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.063206 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wldfz\" (UniqueName: \"kubernetes.io/projected/3063c09c-4f2e-4fdb-b9bc-302e69185203-kube-api-access-wldfz\") pod \"3063c09c-4f2e-4fdb-b9bc-302e69185203\" (UID: \"3063c09c-4f2e-4fdb-b9bc-302e69185203\") " Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.073204 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3063c09c-4f2e-4fdb-b9bc-302e69185203-kube-api-access-wldfz" (OuterVolumeSpecName: "kube-api-access-wldfz") pod "3063c09c-4f2e-4fdb-b9bc-302e69185203" (UID: "3063c09c-4f2e-4fdb-b9bc-302e69185203"). InnerVolumeSpecName "kube-api-access-wldfz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.094674 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3063c09c-4f2e-4fdb-b9bc-302e69185203-inventory" (OuterVolumeSpecName: "inventory") pod "3063c09c-4f2e-4fdb-b9bc-302e69185203" (UID: "3063c09c-4f2e-4fdb-b9bc-302e69185203"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.095172 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3063c09c-4f2e-4fdb-b9bc-302e69185203-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3063c09c-4f2e-4fdb-b9bc-302e69185203" (UID: "3063c09c-4f2e-4fdb-b9bc-302e69185203"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.164905 4923 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3063c09c-4f2e-4fdb-b9bc-302e69185203-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.164950 4923 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3063c09c-4f2e-4fdb-b9bc-302e69185203-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.164963 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wldfz\" (UniqueName: \"kubernetes.io/projected/3063c09c-4f2e-4fdb-b9bc-302e69185203-kube-api-access-wldfz\") on node \"crc\" DevicePath \"\"" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.444499 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl" event={"ID":"3063c09c-4f2e-4fdb-b9bc-302e69185203","Type":"ContainerDied","Data":"5c2b4b37979c8e8beb9080270b7f870247eb48fc68990f4ed340344a6a472018"} Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.444960 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5c2b4b37979c8e8beb9080270b7f870247eb48fc68990f4ed340344a6a472018" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.444589 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.641304 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm"] Nov 28 11:38:12 crc kubenswrapper[4923]: E1128 11:38:12.641991 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3063c09c-4f2e-4fdb-b9bc-302e69185203" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.642094 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="3063c09c-4f2e-4fdb-b9bc-302e69185203" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.642422 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="3063c09c-4f2e-4fdb-b9bc-302e69185203" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.643400 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.648439 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2xnkl" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.648823 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.649064 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.650056 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.653223 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm"] Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.780905 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hhjh\" (UniqueName: \"kubernetes.io/projected/f7e0da2f-9257-45b6-be10-0e7c9daa73ab-kube-api-access-6hhjh\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm\" (UID: \"f7e0da2f-9257-45b6-be10-0e7c9daa73ab\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.781017 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f7e0da2f-9257-45b6-be10-0e7c9daa73ab-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm\" (UID: \"f7e0da2f-9257-45b6-be10-0e7c9daa73ab\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.781150 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f7e0da2f-9257-45b6-be10-0e7c9daa73ab-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm\" (UID: \"f7e0da2f-9257-45b6-be10-0e7c9daa73ab\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.883702 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f7e0da2f-9257-45b6-be10-0e7c9daa73ab-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm\" (UID: \"f7e0da2f-9257-45b6-be10-0e7c9daa73ab\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.883769 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f7e0da2f-9257-45b6-be10-0e7c9daa73ab-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm\" (UID: \"f7e0da2f-9257-45b6-be10-0e7c9daa73ab\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.883887 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hhjh\" (UniqueName: \"kubernetes.io/projected/f7e0da2f-9257-45b6-be10-0e7c9daa73ab-kube-api-access-6hhjh\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm\" (UID: \"f7e0da2f-9257-45b6-be10-0e7c9daa73ab\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.896046 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f7e0da2f-9257-45b6-be10-0e7c9daa73ab-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm\" (UID: \"f7e0da2f-9257-45b6-be10-0e7c9daa73ab\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.900818 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f7e0da2f-9257-45b6-be10-0e7c9daa73ab-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm\" (UID: \"f7e0da2f-9257-45b6-be10-0e7c9daa73ab\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.905030 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hhjh\" (UniqueName: \"kubernetes.io/projected/f7e0da2f-9257-45b6-be10-0e7c9daa73ab-kube-api-access-6hhjh\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm\" (UID: \"f7e0da2f-9257-45b6-be10-0e7c9daa73ab\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm" Nov 28 11:38:12 crc kubenswrapper[4923]: I1128 11:38:12.984700 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm" Nov 28 11:38:13 crc kubenswrapper[4923]: I1128 11:38:13.583002 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm"] Nov 28 11:38:14 crc kubenswrapper[4923]: I1128 11:38:14.168367 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:38:14 crc kubenswrapper[4923]: E1128 11:38:14.169055 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:38:14 crc kubenswrapper[4923]: I1128 11:38:14.472854 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm" event={"ID":"f7e0da2f-9257-45b6-be10-0e7c9daa73ab","Type":"ContainerStarted","Data":"6d8d462aa48228f279eee7fe822e66ff5b3eb4f05e3f71067412e6ca3b25a31a"} Nov 28 11:38:15 crc kubenswrapper[4923]: I1128 11:38:15.485518 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm" event={"ID":"f7e0da2f-9257-45b6-be10-0e7c9daa73ab","Type":"ContainerStarted","Data":"6a9dfc7741a263970657e50beec64ecd82b19f4515b1f93ad72eb8a538463264"} Nov 28 11:38:15 crc kubenswrapper[4923]: I1128 11:38:15.583331 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm" podStartSLOduration=3.093061457 podStartE2EDuration="3.583309905s" podCreationTimestamp="2025-11-28 11:38:12 +0000 UTC" firstStartedPulling="2025-11-28 11:38:13.597722684 +0000 UTC m=+1772.726406894" lastFinishedPulling="2025-11-28 11:38:14.087971122 +0000 UTC m=+1773.216655342" observedRunningTime="2025-11-28 11:38:15.577770528 +0000 UTC m=+1774.706454768" watchObservedRunningTime="2025-11-28 11:38:15.583309905 +0000 UTC m=+1774.711994125" Nov 28 11:38:18 crc kubenswrapper[4923]: I1128 11:38:18.046304 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-dfhlh"] Nov 28 11:38:18 crc kubenswrapper[4923]: I1128 11:38:18.062091 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-dfhlh"] Nov 28 11:38:19 crc kubenswrapper[4923]: I1128 11:38:19.187293 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4173396e-acf1-469e-9c63-4a02a2a1692b" path="/var/lib/kubelet/pods/4173396e-acf1-469e-9c63-4a02a2a1692b/volumes" Nov 28 11:38:22 crc kubenswrapper[4923]: I1128 11:38:22.553495 4923 generic.go:334] "Generic (PLEG): container finished" podID="f7e0da2f-9257-45b6-be10-0e7c9daa73ab" containerID="6a9dfc7741a263970657e50beec64ecd82b19f4515b1f93ad72eb8a538463264" exitCode=0 Nov 28 11:38:22 crc kubenswrapper[4923]: I1128 11:38:22.553581 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm" event={"ID":"f7e0da2f-9257-45b6-be10-0e7c9daa73ab","Type":"ContainerDied","Data":"6a9dfc7741a263970657e50beec64ecd82b19f4515b1f93ad72eb8a538463264"} Nov 28 11:38:23 crc kubenswrapper[4923]: I1128 11:38:23.999081 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.161671 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hhjh\" (UniqueName: \"kubernetes.io/projected/f7e0da2f-9257-45b6-be10-0e7c9daa73ab-kube-api-access-6hhjh\") pod \"f7e0da2f-9257-45b6-be10-0e7c9daa73ab\" (UID: \"f7e0da2f-9257-45b6-be10-0e7c9daa73ab\") " Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.162210 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f7e0da2f-9257-45b6-be10-0e7c9daa73ab-ssh-key\") pod \"f7e0da2f-9257-45b6-be10-0e7c9daa73ab\" (UID: \"f7e0da2f-9257-45b6-be10-0e7c9daa73ab\") " Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.162259 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f7e0da2f-9257-45b6-be10-0e7c9daa73ab-inventory\") pod \"f7e0da2f-9257-45b6-be10-0e7c9daa73ab\" (UID: \"f7e0da2f-9257-45b6-be10-0e7c9daa73ab\") " Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.174968 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7e0da2f-9257-45b6-be10-0e7c9daa73ab-kube-api-access-6hhjh" (OuterVolumeSpecName: "kube-api-access-6hhjh") pod "f7e0da2f-9257-45b6-be10-0e7c9daa73ab" (UID: "f7e0da2f-9257-45b6-be10-0e7c9daa73ab"). InnerVolumeSpecName "kube-api-access-6hhjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.193993 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7e0da2f-9257-45b6-be10-0e7c9daa73ab-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f7e0da2f-9257-45b6-be10-0e7c9daa73ab" (UID: "f7e0da2f-9257-45b6-be10-0e7c9daa73ab"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.214067 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7e0da2f-9257-45b6-be10-0e7c9daa73ab-inventory" (OuterVolumeSpecName: "inventory") pod "f7e0da2f-9257-45b6-be10-0e7c9daa73ab" (UID: "f7e0da2f-9257-45b6-be10-0e7c9daa73ab"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.264686 4923 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f7e0da2f-9257-45b6-be10-0e7c9daa73ab-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.265157 4923 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f7e0da2f-9257-45b6-be10-0e7c9daa73ab-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.265391 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hhjh\" (UniqueName: \"kubernetes.io/projected/f7e0da2f-9257-45b6-be10-0e7c9daa73ab-kube-api-access-6hhjh\") on node \"crc\" DevicePath \"\"" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.581209 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm" event={"ID":"f7e0da2f-9257-45b6-be10-0e7c9daa73ab","Type":"ContainerDied","Data":"6d8d462aa48228f279eee7fe822e66ff5b3eb4f05e3f71067412e6ca3b25a31a"} Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.581621 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d8d462aa48228f279eee7fe822e66ff5b3eb4f05e3f71067412e6ca3b25a31a" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.581384 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.685146 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8"] Nov 28 11:38:24 crc kubenswrapper[4923]: E1128 11:38:24.685558 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7e0da2f-9257-45b6-be10-0e7c9daa73ab" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.685574 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7e0da2f-9257-45b6-be10-0e7c9daa73ab" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.685738 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7e0da2f-9257-45b6-be10-0e7c9daa73ab" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.686320 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.689371 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.689607 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.689729 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.691989 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2xnkl" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.705185 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8"] Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.874293 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5e780070-fef2-46e6-9c83-029164d61a1d-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-snpf8\" (UID: \"5e780070-fef2-46e6-9c83-029164d61a1d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.874406 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrx85\" (UniqueName: \"kubernetes.io/projected/5e780070-fef2-46e6-9c83-029164d61a1d-kube-api-access-rrx85\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-snpf8\" (UID: \"5e780070-fef2-46e6-9c83-029164d61a1d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.874437 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e780070-fef2-46e6-9c83-029164d61a1d-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-snpf8\" (UID: \"5e780070-fef2-46e6-9c83-029164d61a1d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.977026 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrx85\" (UniqueName: \"kubernetes.io/projected/5e780070-fef2-46e6-9c83-029164d61a1d-kube-api-access-rrx85\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-snpf8\" (UID: \"5e780070-fef2-46e6-9c83-029164d61a1d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.977173 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e780070-fef2-46e6-9c83-029164d61a1d-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-snpf8\" (UID: \"5e780070-fef2-46e6-9c83-029164d61a1d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.977384 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5e780070-fef2-46e6-9c83-029164d61a1d-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-snpf8\" (UID: \"5e780070-fef2-46e6-9c83-029164d61a1d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.990287 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5e780070-fef2-46e6-9c83-029164d61a1d-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-snpf8\" (UID: \"5e780070-fef2-46e6-9c83-029164d61a1d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8" Nov 28 11:38:24 crc kubenswrapper[4923]: I1128 11:38:24.992860 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e780070-fef2-46e6-9c83-029164d61a1d-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-snpf8\" (UID: \"5e780070-fef2-46e6-9c83-029164d61a1d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8" Nov 28 11:38:25 crc kubenswrapper[4923]: I1128 11:38:25.006623 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrx85\" (UniqueName: \"kubernetes.io/projected/5e780070-fef2-46e6-9c83-029164d61a1d-kube-api-access-rrx85\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-snpf8\" (UID: \"5e780070-fef2-46e6-9c83-029164d61a1d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8" Nov 28 11:38:25 crc kubenswrapper[4923]: I1128 11:38:25.008484 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8" Nov 28 11:38:25 crc kubenswrapper[4923]: I1128 11:38:25.576376 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8"] Nov 28 11:38:25 crc kubenswrapper[4923]: I1128 11:38:25.601040 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8" event={"ID":"5e780070-fef2-46e6-9c83-029164d61a1d","Type":"ContainerStarted","Data":"a0662a647185882208cfda21929f356028d339af46671da36843a17ff682207a"} Nov 28 11:38:27 crc kubenswrapper[4923]: I1128 11:38:27.623590 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8" event={"ID":"5e780070-fef2-46e6-9c83-029164d61a1d","Type":"ContainerStarted","Data":"be394269cd936e93a8d35cabe1a3a0100090534d5d6f3971648ed1d76f04202f"} Nov 28 11:38:27 crc kubenswrapper[4923]: I1128 11:38:27.652790 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8" podStartSLOduration=2.420011005 podStartE2EDuration="3.652772273s" podCreationTimestamp="2025-11-28 11:38:24 +0000 UTC" firstStartedPulling="2025-11-28 11:38:25.587314584 +0000 UTC m=+1784.715998804" lastFinishedPulling="2025-11-28 11:38:26.820075852 +0000 UTC m=+1785.948760072" observedRunningTime="2025-11-28 11:38:27.647259366 +0000 UTC m=+1786.775943606" watchObservedRunningTime="2025-11-28 11:38:27.652772273 +0000 UTC m=+1786.781456493" Nov 28 11:38:29 crc kubenswrapper[4923]: I1128 11:38:29.168922 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:38:29 crc kubenswrapper[4923]: E1128 11:38:29.169464 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:38:34 crc kubenswrapper[4923]: I1128 11:38:34.067210 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-b867g"] Nov 28 11:38:34 crc kubenswrapper[4923]: I1128 11:38:34.082680 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-b867g"] Nov 28 11:38:35 crc kubenswrapper[4923]: I1128 11:38:35.046403 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-s6twc"] Nov 28 11:38:35 crc kubenswrapper[4923]: I1128 11:38:35.056174 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-s6twc"] Nov 28 11:38:35 crc kubenswrapper[4923]: I1128 11:38:35.178586 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1813822f-07d2-4a68-98bf-26cf5edd6707" path="/var/lib/kubelet/pods/1813822f-07d2-4a68-98bf-26cf5edd6707/volumes" Nov 28 11:38:35 crc kubenswrapper[4923]: I1128 11:38:35.179218 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c7bc447-b1f7-4e68-b0da-310515aecea9" path="/var/lib/kubelet/pods/4c7bc447-b1f7-4e68-b0da-310515aecea9/volumes" Nov 28 11:38:41 crc kubenswrapper[4923]: I1128 11:38:41.176832 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:38:41 crc kubenswrapper[4923]: E1128 11:38:41.177456 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:38:52 crc kubenswrapper[4923]: I1128 11:38:52.168412 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:38:52 crc kubenswrapper[4923]: E1128 11:38:52.169245 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:39:03 crc kubenswrapper[4923]: I1128 11:39:03.170643 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:39:03 crc kubenswrapper[4923]: E1128 11:39:03.171903 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:39:07 crc kubenswrapper[4923]: I1128 11:39:07.355220 4923 scope.go:117] "RemoveContainer" containerID="bec9a5a63c2d24eff6a5181fa8d4b2bd83d6de38ee6453578c0aedfb3b3adb2a" Nov 28 11:39:07 crc kubenswrapper[4923]: I1128 11:39:07.411241 4923 scope.go:117] "RemoveContainer" containerID="12f53f24d35abec23848f3fd1f0a37dcfe9e4e2513abc3bf52ccfbe8e27edf78" Nov 28 11:39:07 crc kubenswrapper[4923]: I1128 11:39:07.454044 4923 scope.go:117] "RemoveContainer" containerID="4e1dcbe13bba7a95afee9778c80901bf4dff2a4cf4c8bec19d0efaa3eb2d8d02" Nov 28 11:39:07 crc kubenswrapper[4923]: I1128 11:39:07.516210 4923 scope.go:117] "RemoveContainer" containerID="74b379aca93af9f2387202b8bda7b746d56fc3b1ac2a351fc25aa0037baf6098" Nov 28 11:39:16 crc kubenswrapper[4923]: I1128 11:39:16.170375 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:39:16 crc kubenswrapper[4923]: E1128 11:39:16.171511 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:39:17 crc kubenswrapper[4923]: I1128 11:39:17.066614 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-v9nls"] Nov 28 11:39:17 crc kubenswrapper[4923]: I1128 11:39:17.077118 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-s4rht"] Nov 28 11:39:17 crc kubenswrapper[4923]: I1128 11:39:17.086790 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-nfvs8"] Nov 28 11:39:17 crc kubenswrapper[4923]: I1128 11:39:17.093194 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-s4rht"] Nov 28 11:39:17 crc kubenswrapper[4923]: I1128 11:39:17.098875 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-nfvs8"] Nov 28 11:39:17 crc kubenswrapper[4923]: I1128 11:39:17.104752 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-v9nls"] Nov 28 11:39:17 crc kubenswrapper[4923]: I1128 11:39:17.129571 4923 generic.go:334] "Generic (PLEG): container finished" podID="5e780070-fef2-46e6-9c83-029164d61a1d" containerID="be394269cd936e93a8d35cabe1a3a0100090534d5d6f3971648ed1d76f04202f" exitCode=0 Nov 28 11:39:17 crc kubenswrapper[4923]: I1128 11:39:17.129673 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8" event={"ID":"5e780070-fef2-46e6-9c83-029164d61a1d","Type":"ContainerDied","Data":"be394269cd936e93a8d35cabe1a3a0100090534d5d6f3971648ed1d76f04202f"} Nov 28 11:39:17 crc kubenswrapper[4923]: I1128 11:39:17.179085 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0884198d-4d47-4e53-8bb6-ea2e8365cadd" path="/var/lib/kubelet/pods/0884198d-4d47-4e53-8bb6-ea2e8365cadd/volumes" Nov 28 11:39:17 crc kubenswrapper[4923]: I1128 11:39:17.180242 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28e2a9c4-1fc5-42d5-9afe-63d569d58db4" path="/var/lib/kubelet/pods/28e2a9c4-1fc5-42d5-9afe-63d569d58db4/volumes" Nov 28 11:39:17 crc kubenswrapper[4923]: I1128 11:39:17.181034 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9616fc3-155f-4546-a191-2bd6337a71a7" path="/var/lib/kubelet/pods/d9616fc3-155f-4546-a191-2bd6337a71a7/volumes" Nov 28 11:39:18 crc kubenswrapper[4923]: I1128 11:39:18.066841 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-eaa6-account-create-update-q5lzr"] Nov 28 11:39:18 crc kubenswrapper[4923]: I1128 11:39:18.079119 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-c848-account-create-update-cwvm5"] Nov 28 11:39:18 crc kubenswrapper[4923]: I1128 11:39:18.095964 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-e361-account-create-update-g2428"] Nov 28 11:39:18 crc kubenswrapper[4923]: I1128 11:39:18.103889 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-eaa6-account-create-update-q5lzr"] Nov 28 11:39:18 crc kubenswrapper[4923]: I1128 11:39:18.112672 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-e361-account-create-update-g2428"] Nov 28 11:39:18 crc kubenswrapper[4923]: I1128 11:39:18.120354 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-c848-account-create-update-cwvm5"] Nov 28 11:39:18 crc kubenswrapper[4923]: I1128 11:39:18.520946 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8" Nov 28 11:39:18 crc kubenswrapper[4923]: I1128 11:39:18.661573 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e780070-fef2-46e6-9c83-029164d61a1d-inventory\") pod \"5e780070-fef2-46e6-9c83-029164d61a1d\" (UID: \"5e780070-fef2-46e6-9c83-029164d61a1d\") " Nov 28 11:39:18 crc kubenswrapper[4923]: I1128 11:39:18.661762 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrx85\" (UniqueName: \"kubernetes.io/projected/5e780070-fef2-46e6-9c83-029164d61a1d-kube-api-access-rrx85\") pod \"5e780070-fef2-46e6-9c83-029164d61a1d\" (UID: \"5e780070-fef2-46e6-9c83-029164d61a1d\") " Nov 28 11:39:18 crc kubenswrapper[4923]: I1128 11:39:18.661864 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5e780070-fef2-46e6-9c83-029164d61a1d-ssh-key\") pod \"5e780070-fef2-46e6-9c83-029164d61a1d\" (UID: \"5e780070-fef2-46e6-9c83-029164d61a1d\") " Nov 28 11:39:18 crc kubenswrapper[4923]: I1128 11:39:18.668293 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e780070-fef2-46e6-9c83-029164d61a1d-kube-api-access-rrx85" (OuterVolumeSpecName: "kube-api-access-rrx85") pod "5e780070-fef2-46e6-9c83-029164d61a1d" (UID: "5e780070-fef2-46e6-9c83-029164d61a1d"). InnerVolumeSpecName "kube-api-access-rrx85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:39:18 crc kubenswrapper[4923]: I1128 11:39:18.695698 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e780070-fef2-46e6-9c83-029164d61a1d-inventory" (OuterVolumeSpecName: "inventory") pod "5e780070-fef2-46e6-9c83-029164d61a1d" (UID: "5e780070-fef2-46e6-9c83-029164d61a1d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:39:18 crc kubenswrapper[4923]: I1128 11:39:18.711754 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e780070-fef2-46e6-9c83-029164d61a1d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5e780070-fef2-46e6-9c83-029164d61a1d" (UID: "5e780070-fef2-46e6-9c83-029164d61a1d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:39:18 crc kubenswrapper[4923]: I1128 11:39:18.764340 4923 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5e780070-fef2-46e6-9c83-029164d61a1d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 11:39:18 crc kubenswrapper[4923]: I1128 11:39:18.764752 4923 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e780070-fef2-46e6-9c83-029164d61a1d-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 11:39:18 crc kubenswrapper[4923]: I1128 11:39:18.765232 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrx85\" (UniqueName: \"kubernetes.io/projected/5e780070-fef2-46e6-9c83-029164d61a1d-kube-api-access-rrx85\") on node \"crc\" DevicePath \"\"" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.160073 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8" event={"ID":"5e780070-fef2-46e6-9c83-029164d61a1d","Type":"ContainerDied","Data":"a0662a647185882208cfda21929f356028d339af46671da36843a17ff682207a"} Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.160130 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a0662a647185882208cfda21929f356028d339af46671da36843a17ff682207a" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.160187 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-snpf8" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.192923 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8251c07b-8159-40bd-8f32-51d8b0c4568a" path="/var/lib/kubelet/pods/8251c07b-8159-40bd-8f32-51d8b0c4568a/volumes" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.195979 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6e7f7d2-60c3-4bf4-925e-06b4f92b333d" path="/var/lib/kubelet/pods/a6e7f7d2-60c3-4bf4-925e-06b4f92b333d/volumes" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.197273 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3d9b34a-edae-4f44-b4e1-78b3ece44177" path="/var/lib/kubelet/pods/e3d9b34a-edae-4f44-b4e1-78b3ece44177/volumes" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.277858 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj"] Nov 28 11:39:19 crc kubenswrapper[4923]: E1128 11:39:19.278331 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e780070-fef2-46e6-9c83-029164d61a1d" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.278353 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e780070-fef2-46e6-9c83-029164d61a1d" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.278629 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e780070-fef2-46e6-9c83-029164d61a1d" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.279273 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.282629 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.282805 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.284075 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.284237 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2xnkl" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.309334 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj"] Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.380340 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wj5tf\" (UniqueName: \"kubernetes.io/projected/dc2f570f-2c91-4271-9d89-e10d5e0fe601-kube-api-access-wj5tf\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj\" (UID: \"dc2f570f-2c91-4271-9d89-e10d5e0fe601\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.380430 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc2f570f-2c91-4271-9d89-e10d5e0fe601-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj\" (UID: \"dc2f570f-2c91-4271-9d89-e10d5e0fe601\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.380539 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dc2f570f-2c91-4271-9d89-e10d5e0fe601-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj\" (UID: \"dc2f570f-2c91-4271-9d89-e10d5e0fe601\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.483490 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wj5tf\" (UniqueName: \"kubernetes.io/projected/dc2f570f-2c91-4271-9d89-e10d5e0fe601-kube-api-access-wj5tf\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj\" (UID: \"dc2f570f-2c91-4271-9d89-e10d5e0fe601\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.483644 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc2f570f-2c91-4271-9d89-e10d5e0fe601-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj\" (UID: \"dc2f570f-2c91-4271-9d89-e10d5e0fe601\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.483793 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dc2f570f-2c91-4271-9d89-e10d5e0fe601-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj\" (UID: \"dc2f570f-2c91-4271-9d89-e10d5e0fe601\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.494079 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dc2f570f-2c91-4271-9d89-e10d5e0fe601-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj\" (UID: \"dc2f570f-2c91-4271-9d89-e10d5e0fe601\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.497431 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc2f570f-2c91-4271-9d89-e10d5e0fe601-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj\" (UID: \"dc2f570f-2c91-4271-9d89-e10d5e0fe601\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.515996 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wj5tf\" (UniqueName: \"kubernetes.io/projected/dc2f570f-2c91-4271-9d89-e10d5e0fe601-kube-api-access-wj5tf\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj\" (UID: \"dc2f570f-2c91-4271-9d89-e10d5e0fe601\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj" Nov 28 11:39:19 crc kubenswrapper[4923]: I1128 11:39:19.599115 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj" Nov 28 11:39:20 crc kubenswrapper[4923]: I1128 11:39:20.186703 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj"] Nov 28 11:39:20 crc kubenswrapper[4923]: W1128 11:39:20.192854 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc2f570f_2c91_4271_9d89_e10d5e0fe601.slice/crio-eba78cba4d36cbebadc63b36468208f62fad8037d56ecf1f7aaf96a6aa1d2c1c WatchSource:0}: Error finding container eba78cba4d36cbebadc63b36468208f62fad8037d56ecf1f7aaf96a6aa1d2c1c: Status 404 returned error can't find the container with id eba78cba4d36cbebadc63b36468208f62fad8037d56ecf1f7aaf96a6aa1d2c1c Nov 28 11:39:21 crc kubenswrapper[4923]: I1128 11:39:21.200436 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj" event={"ID":"dc2f570f-2c91-4271-9d89-e10d5e0fe601","Type":"ContainerStarted","Data":"eba78cba4d36cbebadc63b36468208f62fad8037d56ecf1f7aaf96a6aa1d2c1c"} Nov 28 11:39:22 crc kubenswrapper[4923]: I1128 11:39:22.213177 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj" event={"ID":"dc2f570f-2c91-4271-9d89-e10d5e0fe601","Type":"ContainerStarted","Data":"517fccd453695b264dd486ef3f13ce301fff9f8002589808fedfc00b898271e9"} Nov 28 11:39:22 crc kubenswrapper[4923]: I1128 11:39:22.246751 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj" podStartSLOduration=1.956721785 podStartE2EDuration="3.246729359s" podCreationTimestamp="2025-11-28 11:39:19 +0000 UTC" firstStartedPulling="2025-11-28 11:39:20.198667926 +0000 UTC m=+1839.327352146" lastFinishedPulling="2025-11-28 11:39:21.4886755 +0000 UTC m=+1840.617359720" observedRunningTime="2025-11-28 11:39:22.232084153 +0000 UTC m=+1841.360768373" watchObservedRunningTime="2025-11-28 11:39:22.246729359 +0000 UTC m=+1841.375413579" Nov 28 11:39:27 crc kubenswrapper[4923]: I1128 11:39:27.268193 4923 generic.go:334] "Generic (PLEG): container finished" podID="dc2f570f-2c91-4271-9d89-e10d5e0fe601" containerID="517fccd453695b264dd486ef3f13ce301fff9f8002589808fedfc00b898271e9" exitCode=0 Nov 28 11:39:27 crc kubenswrapper[4923]: I1128 11:39:27.268301 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj" event={"ID":"dc2f570f-2c91-4271-9d89-e10d5e0fe601","Type":"ContainerDied","Data":"517fccd453695b264dd486ef3f13ce301fff9f8002589808fedfc00b898271e9"} Nov 28 11:39:28 crc kubenswrapper[4923]: I1128 11:39:28.748588 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj" Nov 28 11:39:28 crc kubenswrapper[4923]: I1128 11:39:28.917780 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wj5tf\" (UniqueName: \"kubernetes.io/projected/dc2f570f-2c91-4271-9d89-e10d5e0fe601-kube-api-access-wj5tf\") pod \"dc2f570f-2c91-4271-9d89-e10d5e0fe601\" (UID: \"dc2f570f-2c91-4271-9d89-e10d5e0fe601\") " Nov 28 11:39:28 crc kubenswrapper[4923]: I1128 11:39:28.918408 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc2f570f-2c91-4271-9d89-e10d5e0fe601-inventory\") pod \"dc2f570f-2c91-4271-9d89-e10d5e0fe601\" (UID: \"dc2f570f-2c91-4271-9d89-e10d5e0fe601\") " Nov 28 11:39:28 crc kubenswrapper[4923]: I1128 11:39:28.918540 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dc2f570f-2c91-4271-9d89-e10d5e0fe601-ssh-key\") pod \"dc2f570f-2c91-4271-9d89-e10d5e0fe601\" (UID: \"dc2f570f-2c91-4271-9d89-e10d5e0fe601\") " Nov 28 11:39:28 crc kubenswrapper[4923]: I1128 11:39:28.935346 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc2f570f-2c91-4271-9d89-e10d5e0fe601-kube-api-access-wj5tf" (OuterVolumeSpecName: "kube-api-access-wj5tf") pod "dc2f570f-2c91-4271-9d89-e10d5e0fe601" (UID: "dc2f570f-2c91-4271-9d89-e10d5e0fe601"). InnerVolumeSpecName "kube-api-access-wj5tf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:39:28 crc kubenswrapper[4923]: I1128 11:39:28.952047 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc2f570f-2c91-4271-9d89-e10d5e0fe601-inventory" (OuterVolumeSpecName: "inventory") pod "dc2f570f-2c91-4271-9d89-e10d5e0fe601" (UID: "dc2f570f-2c91-4271-9d89-e10d5e0fe601"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:39:28 crc kubenswrapper[4923]: I1128 11:39:28.954605 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc2f570f-2c91-4271-9d89-e10d5e0fe601-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "dc2f570f-2c91-4271-9d89-e10d5e0fe601" (UID: "dc2f570f-2c91-4271-9d89-e10d5e0fe601"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.021383 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wj5tf\" (UniqueName: \"kubernetes.io/projected/dc2f570f-2c91-4271-9d89-e10d5e0fe601-kube-api-access-wj5tf\") on node \"crc\" DevicePath \"\"" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.021414 4923 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc2f570f-2c91-4271-9d89-e10d5e0fe601-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.021423 4923 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dc2f570f-2c91-4271-9d89-e10d5e0fe601-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.293632 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj" event={"ID":"dc2f570f-2c91-4271-9d89-e10d5e0fe601","Type":"ContainerDied","Data":"eba78cba4d36cbebadc63b36468208f62fad8037d56ecf1f7aaf96a6aa1d2c1c"} Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.293692 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eba78cba4d36cbebadc63b36468208f62fad8037d56ecf1f7aaf96a6aa1d2c1c" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.293757 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.371974 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8"] Nov 28 11:39:29 crc kubenswrapper[4923]: E1128 11:39:29.372570 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc2f570f-2c91-4271-9d89-e10d5e0fe601" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.372645 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc2f570f-2c91-4271-9d89-e10d5e0fe601" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.372881 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc2f570f-2c91-4271-9d89-e10d5e0fe601" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.373476 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.376742 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.377015 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.377284 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2xnkl" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.377585 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.394493 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8"] Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.532749 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8\" (UID: \"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.532860 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8\" (UID: \"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.533050 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9ntf\" (UniqueName: \"kubernetes.io/projected/7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3-kube-api-access-f9ntf\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8\" (UID: \"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.635007 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8\" (UID: \"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.635078 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8\" (UID: \"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.635179 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9ntf\" (UniqueName: \"kubernetes.io/projected/7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3-kube-api-access-f9ntf\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8\" (UID: \"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.642043 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8\" (UID: \"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.642199 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8\" (UID: \"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.657114 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9ntf\" (UniqueName: \"kubernetes.io/projected/7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3-kube-api-access-f9ntf\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8\" (UID: \"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8" Nov 28 11:39:29 crc kubenswrapper[4923]: I1128 11:39:29.698590 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8" Nov 28 11:39:30 crc kubenswrapper[4923]: I1128 11:39:30.170197 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:39:30 crc kubenswrapper[4923]: E1128 11:39:30.170705 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:39:30 crc kubenswrapper[4923]: I1128 11:39:30.873824 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8"] Nov 28 11:39:31 crc kubenswrapper[4923]: I1128 11:39:31.321974 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8" event={"ID":"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3","Type":"ContainerStarted","Data":"02f98c3066e3d975cdacf8dd193cce90e38605a3f97384a75716d802e3fcd8a7"} Nov 28 11:39:32 crc kubenswrapper[4923]: I1128 11:39:32.334020 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8" event={"ID":"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3","Type":"ContainerStarted","Data":"0dea174be5cef227b1caa0c6ebb18b90d4187e4138ef7eb68f6296dbdeac81f4"} Nov 28 11:39:32 crc kubenswrapper[4923]: I1128 11:39:32.362756 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8" podStartSLOduration=2.8828803069999998 podStartE2EDuration="3.362735205s" podCreationTimestamp="2025-11-28 11:39:29 +0000 UTC" firstStartedPulling="2025-11-28 11:39:30.883845976 +0000 UTC m=+1850.012530226" lastFinishedPulling="2025-11-28 11:39:31.363700884 +0000 UTC m=+1850.492385124" observedRunningTime="2025-11-28 11:39:32.356253481 +0000 UTC m=+1851.484937761" watchObservedRunningTime="2025-11-28 11:39:32.362735205 +0000 UTC m=+1851.491419425" Nov 28 11:39:41 crc kubenswrapper[4923]: I1128 11:39:41.176791 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:39:41 crc kubenswrapper[4923]: E1128 11:39:41.178253 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:39:46 crc kubenswrapper[4923]: I1128 11:39:46.058658 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6vmzl"] Nov 28 11:39:46 crc kubenswrapper[4923]: I1128 11:39:46.075752 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6vmzl"] Nov 28 11:39:47 crc kubenswrapper[4923]: I1128 11:39:47.182020 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1a099ed-ae12-465a-b12d-f0bb966dfc64" path="/var/lib/kubelet/pods/a1a099ed-ae12-465a-b12d-f0bb966dfc64/volumes" Nov 28 11:39:53 crc kubenswrapper[4923]: I1128 11:39:53.169121 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:39:53 crc kubenswrapper[4923]: I1128 11:39:53.584662 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerStarted","Data":"201e214c150f7a94a55f7f14ac88bad5c3f58ddde6dc1868cf78309362438d26"} Nov 28 11:40:05 crc kubenswrapper[4923]: I1128 11:40:05.044906 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-8xtzd"] Nov 28 11:40:05 crc kubenswrapper[4923]: I1128 11:40:05.058799 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-8xtzd"] Nov 28 11:40:05 crc kubenswrapper[4923]: I1128 11:40:05.187897 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d26d3820-97ce-42bc-92c8-c48082942764" path="/var/lib/kubelet/pods/d26d3820-97ce-42bc-92c8-c48082942764/volumes" Nov 28 11:40:07 crc kubenswrapper[4923]: I1128 11:40:07.057511 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rmq5g"] Nov 28 11:40:07 crc kubenswrapper[4923]: I1128 11:40:07.072210 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rmq5g"] Nov 28 11:40:07 crc kubenswrapper[4923]: I1128 11:40:07.178341 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ad75bb0-7f36-4ac3-b0a7-402237601802" path="/var/lib/kubelet/pods/3ad75bb0-7f36-4ac3-b0a7-402237601802/volumes" Nov 28 11:40:07 crc kubenswrapper[4923]: I1128 11:40:07.629415 4923 scope.go:117] "RemoveContainer" containerID="7e9207943b07a406fe8f65daf852a00fa82fa2e38b70dafe5af1e90dda7afd40" Nov 28 11:40:07 crc kubenswrapper[4923]: I1128 11:40:07.687723 4923 scope.go:117] "RemoveContainer" containerID="a6a46703e68f3f61cb5f0f71bbc5d80e8dc42858564ac4249f5b7327c8b93fa5" Nov 28 11:40:07 crc kubenswrapper[4923]: I1128 11:40:07.706853 4923 scope.go:117] "RemoveContainer" containerID="f32f3db62c773c399eff8e87a70356fd322b810129d7b02ef8f4f1ae002cab61" Nov 28 11:40:07 crc kubenswrapper[4923]: I1128 11:40:07.747361 4923 scope.go:117] "RemoveContainer" containerID="f2f4b931fe6b25b5a0b10bfe1ecad11069e795755d331f21d65b3ab8e9b3d841" Nov 28 11:40:07 crc kubenswrapper[4923]: I1128 11:40:07.782367 4923 scope.go:117] "RemoveContainer" containerID="a3b5459607c6561ea92074c815ab5308bd2847c59c84bff0964a44dcd9d77c33" Nov 28 11:40:07 crc kubenswrapper[4923]: I1128 11:40:07.812611 4923 scope.go:117] "RemoveContainer" containerID="2c2cbe76ca8f2486db544ffd5af79c5c5163fc41ce33b20697626fe528913cb6" Nov 28 11:40:07 crc kubenswrapper[4923]: I1128 11:40:07.854445 4923 scope.go:117] "RemoveContainer" containerID="c0ef187ebe0dfae485de6607fa0a6262f6a611373d82f1b47fa891faebac413a" Nov 28 11:40:07 crc kubenswrapper[4923]: I1128 11:40:07.914187 4923 scope.go:117] "RemoveContainer" containerID="b6d0d500917d58745e9dd95496c94c108616618eda406f6118dedf1b5123f2d0" Nov 28 11:40:07 crc kubenswrapper[4923]: I1128 11:40:07.941510 4923 scope.go:117] "RemoveContainer" containerID="b371cb093ed219d127de4b09c49ab301c61daae66c5c0159d8c5f84d9db85a7a" Nov 28 11:40:33 crc kubenswrapper[4923]: I1128 11:40:33.943337 4923 generic.go:334] "Generic (PLEG): container finished" podID="7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3" containerID="0dea174be5cef227b1caa0c6ebb18b90d4187e4138ef7eb68f6296dbdeac81f4" exitCode=0 Nov 28 11:40:33 crc kubenswrapper[4923]: I1128 11:40:33.943419 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8" event={"ID":"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3","Type":"ContainerDied","Data":"0dea174be5cef227b1caa0c6ebb18b90d4187e4138ef7eb68f6296dbdeac81f4"} Nov 28 11:40:35 crc kubenswrapper[4923]: I1128 11:40:35.376044 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8" Nov 28 11:40:35 crc kubenswrapper[4923]: I1128 11:40:35.423752 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3-inventory\") pod \"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3\" (UID: \"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3\") " Nov 28 11:40:35 crc kubenswrapper[4923]: I1128 11:40:35.424103 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9ntf\" (UniqueName: \"kubernetes.io/projected/7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3-kube-api-access-f9ntf\") pod \"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3\" (UID: \"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3\") " Nov 28 11:40:35 crc kubenswrapper[4923]: I1128 11:40:35.424499 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3-ssh-key\") pod \"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3\" (UID: \"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3\") " Nov 28 11:40:35 crc kubenswrapper[4923]: I1128 11:40:35.449032 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3-kube-api-access-f9ntf" (OuterVolumeSpecName: "kube-api-access-f9ntf") pod "7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3" (UID: "7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3"). InnerVolumeSpecName "kube-api-access-f9ntf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:40:35 crc kubenswrapper[4923]: I1128 11:40:35.455685 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3" (UID: "7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:40:35 crc kubenswrapper[4923]: I1128 11:40:35.477000 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3-inventory" (OuterVolumeSpecName: "inventory") pod "7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3" (UID: "7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:40:35 crc kubenswrapper[4923]: I1128 11:40:35.527670 4923 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 11:40:35 crc kubenswrapper[4923]: I1128 11:40:35.527707 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f9ntf\" (UniqueName: \"kubernetes.io/projected/7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3-kube-api-access-f9ntf\") on node \"crc\" DevicePath \"\"" Nov 28 11:40:35 crc kubenswrapper[4923]: I1128 11:40:35.527720 4923 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 11:40:35 crc kubenswrapper[4923]: I1128 11:40:35.961168 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8" event={"ID":"7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3","Type":"ContainerDied","Data":"02f98c3066e3d975cdacf8dd193cce90e38605a3f97384a75716d802e3fcd8a7"} Nov 28 11:40:35 crc kubenswrapper[4923]: I1128 11:40:35.961224 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="02f98c3066e3d975cdacf8dd193cce90e38605a3f97384a75716d802e3fcd8a7" Nov 28 11:40:35 crc kubenswrapper[4923]: I1128 11:40:35.961499 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.063874 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-6tcd6"] Nov 28 11:40:36 crc kubenswrapper[4923]: E1128 11:40:36.064236 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.064254 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.064415 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.064997 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-6tcd6" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.071066 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.071168 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.081630 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2xnkl" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.082163 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.092929 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-6tcd6"] Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.139307 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nk7g7\" (UniqueName: \"kubernetes.io/projected/e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11-kube-api-access-nk7g7\") pod \"ssh-known-hosts-edpm-deployment-6tcd6\" (UID: \"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11\") " pod="openstack/ssh-known-hosts-edpm-deployment-6tcd6" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.139507 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-6tcd6\" (UID: \"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11\") " pod="openstack/ssh-known-hosts-edpm-deployment-6tcd6" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.139630 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-6tcd6\" (UID: \"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11\") " pod="openstack/ssh-known-hosts-edpm-deployment-6tcd6" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.241117 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-6tcd6\" (UID: \"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11\") " pod="openstack/ssh-known-hosts-edpm-deployment-6tcd6" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.242170 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nk7g7\" (UniqueName: \"kubernetes.io/projected/e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11-kube-api-access-nk7g7\") pod \"ssh-known-hosts-edpm-deployment-6tcd6\" (UID: \"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11\") " pod="openstack/ssh-known-hosts-edpm-deployment-6tcd6" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.242700 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-6tcd6\" (UID: \"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11\") " pod="openstack/ssh-known-hosts-edpm-deployment-6tcd6" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.245672 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-6tcd6\" (UID: \"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11\") " pod="openstack/ssh-known-hosts-edpm-deployment-6tcd6" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.246535 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-6tcd6\" (UID: \"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11\") " pod="openstack/ssh-known-hosts-edpm-deployment-6tcd6" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.266862 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nk7g7\" (UniqueName: \"kubernetes.io/projected/e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11-kube-api-access-nk7g7\") pod \"ssh-known-hosts-edpm-deployment-6tcd6\" (UID: \"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11\") " pod="openstack/ssh-known-hosts-edpm-deployment-6tcd6" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.383160 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-6tcd6" Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.941554 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-6tcd6"] Nov 28 11:40:36 crc kubenswrapper[4923]: I1128 11:40:36.971683 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-6tcd6" event={"ID":"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11","Type":"ContainerStarted","Data":"8856dd7c53ebedd07041f66eed81cda94bb2bfb477c4de2510b2d4f5249035ce"} Nov 28 11:40:37 crc kubenswrapper[4923]: I1128 11:40:37.983750 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-6tcd6" event={"ID":"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11","Type":"ContainerStarted","Data":"5a7f981536e22e8ff12c48644244409d1cad57cf86c5412cc9773fb30262e51b"} Nov 28 11:40:46 crc kubenswrapper[4923]: I1128 11:40:46.097465 4923 generic.go:334] "Generic (PLEG): container finished" podID="e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11" containerID="5a7f981536e22e8ff12c48644244409d1cad57cf86c5412cc9773fb30262e51b" exitCode=0 Nov 28 11:40:46 crc kubenswrapper[4923]: I1128 11:40:46.097563 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-6tcd6" event={"ID":"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11","Type":"ContainerDied","Data":"5a7f981536e22e8ff12c48644244409d1cad57cf86c5412cc9773fb30262e51b"} Nov 28 11:40:47 crc kubenswrapper[4923]: I1128 11:40:47.545913 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-6tcd6" Nov 28 11:40:47 crc kubenswrapper[4923]: I1128 11:40:47.578986 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nk7g7\" (UniqueName: \"kubernetes.io/projected/e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11-kube-api-access-nk7g7\") pod \"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11\" (UID: \"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11\") " Nov 28 11:40:47 crc kubenswrapper[4923]: I1128 11:40:47.579067 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11-ssh-key-openstack-edpm-ipam\") pod \"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11\" (UID: \"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11\") " Nov 28 11:40:47 crc kubenswrapper[4923]: I1128 11:40:47.579222 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11-inventory-0\") pod \"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11\" (UID: \"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11\") " Nov 28 11:40:47 crc kubenswrapper[4923]: I1128 11:40:47.597109 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11-kube-api-access-nk7g7" (OuterVolumeSpecName: "kube-api-access-nk7g7") pod "e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11" (UID: "e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11"). InnerVolumeSpecName "kube-api-access-nk7g7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:40:47 crc kubenswrapper[4923]: I1128 11:40:47.606737 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11" (UID: "e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:40:47 crc kubenswrapper[4923]: I1128 11:40:47.633050 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11" (UID: "e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:40:47 crc kubenswrapper[4923]: I1128 11:40:47.686779 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nk7g7\" (UniqueName: \"kubernetes.io/projected/e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11-kube-api-access-nk7g7\") on node \"crc\" DevicePath \"\"" Nov 28 11:40:47 crc kubenswrapper[4923]: I1128 11:40:47.686832 4923 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 28 11:40:47 crc kubenswrapper[4923]: I1128 11:40:47.686842 4923 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.126596 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-6tcd6" event={"ID":"e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11","Type":"ContainerDied","Data":"8856dd7c53ebedd07041f66eed81cda94bb2bfb477c4de2510b2d4f5249035ce"} Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.126664 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8856dd7c53ebedd07041f66eed81cda94bb2bfb477c4de2510b2d4f5249035ce" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.126743 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-6tcd6" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.212189 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh"] Nov 28 11:40:48 crc kubenswrapper[4923]: E1128 11:40:48.212768 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11" containerName="ssh-known-hosts-edpm-deployment" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.212786 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11" containerName="ssh-known-hosts-edpm-deployment" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.212950 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11" containerName="ssh-known-hosts-edpm-deployment" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.213532 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.216062 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2xnkl" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.216339 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.217091 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.217357 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.235560 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh"] Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.298330 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mh88v\" (UniqueName: \"kubernetes.io/projected/97a4f32b-623f-4eda-9418-9edc6c64a043-kube-api-access-mh88v\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-mtsmh\" (UID: \"97a4f32b-623f-4eda-9418-9edc6c64a043\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.298423 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/97a4f32b-623f-4eda-9418-9edc6c64a043-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-mtsmh\" (UID: \"97a4f32b-623f-4eda-9418-9edc6c64a043\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.298608 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/97a4f32b-623f-4eda-9418-9edc6c64a043-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-mtsmh\" (UID: \"97a4f32b-623f-4eda-9418-9edc6c64a043\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.400601 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/97a4f32b-623f-4eda-9418-9edc6c64a043-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-mtsmh\" (UID: \"97a4f32b-623f-4eda-9418-9edc6c64a043\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.400721 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mh88v\" (UniqueName: \"kubernetes.io/projected/97a4f32b-623f-4eda-9418-9edc6c64a043-kube-api-access-mh88v\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-mtsmh\" (UID: \"97a4f32b-623f-4eda-9418-9edc6c64a043\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.400803 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/97a4f32b-623f-4eda-9418-9edc6c64a043-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-mtsmh\" (UID: \"97a4f32b-623f-4eda-9418-9edc6c64a043\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.406691 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/97a4f32b-623f-4eda-9418-9edc6c64a043-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-mtsmh\" (UID: \"97a4f32b-623f-4eda-9418-9edc6c64a043\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.407458 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/97a4f32b-623f-4eda-9418-9edc6c64a043-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-mtsmh\" (UID: \"97a4f32b-623f-4eda-9418-9edc6c64a043\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.428844 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mh88v\" (UniqueName: \"kubernetes.io/projected/97a4f32b-623f-4eda-9418-9edc6c64a043-kube-api-access-mh88v\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-mtsmh\" (UID: \"97a4f32b-623f-4eda-9418-9edc6c64a043\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh" Nov 28 11:40:48 crc kubenswrapper[4923]: I1128 11:40:48.529155 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh" Nov 28 11:40:49 crc kubenswrapper[4923]: I1128 11:40:49.047173 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-z2zwx"] Nov 28 11:40:49 crc kubenswrapper[4923]: I1128 11:40:49.055621 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-z2zwx"] Nov 28 11:40:49 crc kubenswrapper[4923]: I1128 11:40:49.085343 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh"] Nov 28 11:40:49 crc kubenswrapper[4923]: I1128 11:40:49.134514 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh" event={"ID":"97a4f32b-623f-4eda-9418-9edc6c64a043","Type":"ContainerStarted","Data":"3e33a981ea6e0a18984b99845c3c8d86614ebed7420260c0bca4211d71b91954"} Nov 28 11:40:49 crc kubenswrapper[4923]: I1128 11:40:49.177305 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="327ab34b-b6a6-4e7d-be85-933e09902a9a" path="/var/lib/kubelet/pods/327ab34b-b6a6-4e7d-be85-933e09902a9a/volumes" Nov 28 11:40:49 crc kubenswrapper[4923]: I1128 11:40:49.384015 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4dvck"] Nov 28 11:40:49 crc kubenswrapper[4923]: I1128 11:40:49.386729 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4dvck" Nov 28 11:40:49 crc kubenswrapper[4923]: I1128 11:40:49.393470 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4dvck"] Nov 28 11:40:49 crc kubenswrapper[4923]: I1128 11:40:49.420140 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de618ad7-8216-4a56-b312-79e747eb9fb4-utilities\") pod \"redhat-operators-4dvck\" (UID: \"de618ad7-8216-4a56-b312-79e747eb9fb4\") " pod="openshift-marketplace/redhat-operators-4dvck" Nov 28 11:40:49 crc kubenswrapper[4923]: I1128 11:40:49.420232 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bbs8\" (UniqueName: \"kubernetes.io/projected/de618ad7-8216-4a56-b312-79e747eb9fb4-kube-api-access-5bbs8\") pod \"redhat-operators-4dvck\" (UID: \"de618ad7-8216-4a56-b312-79e747eb9fb4\") " pod="openshift-marketplace/redhat-operators-4dvck" Nov 28 11:40:49 crc kubenswrapper[4923]: I1128 11:40:49.420357 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de618ad7-8216-4a56-b312-79e747eb9fb4-catalog-content\") pod \"redhat-operators-4dvck\" (UID: \"de618ad7-8216-4a56-b312-79e747eb9fb4\") " pod="openshift-marketplace/redhat-operators-4dvck" Nov 28 11:40:49 crc kubenswrapper[4923]: I1128 11:40:49.521649 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de618ad7-8216-4a56-b312-79e747eb9fb4-catalog-content\") pod \"redhat-operators-4dvck\" (UID: \"de618ad7-8216-4a56-b312-79e747eb9fb4\") " pod="openshift-marketplace/redhat-operators-4dvck" Nov 28 11:40:49 crc kubenswrapper[4923]: I1128 11:40:49.521714 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de618ad7-8216-4a56-b312-79e747eb9fb4-utilities\") pod \"redhat-operators-4dvck\" (UID: \"de618ad7-8216-4a56-b312-79e747eb9fb4\") " pod="openshift-marketplace/redhat-operators-4dvck" Nov 28 11:40:49 crc kubenswrapper[4923]: I1128 11:40:49.521759 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bbs8\" (UniqueName: \"kubernetes.io/projected/de618ad7-8216-4a56-b312-79e747eb9fb4-kube-api-access-5bbs8\") pod \"redhat-operators-4dvck\" (UID: \"de618ad7-8216-4a56-b312-79e747eb9fb4\") " pod="openshift-marketplace/redhat-operators-4dvck" Nov 28 11:40:49 crc kubenswrapper[4923]: I1128 11:40:49.522469 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de618ad7-8216-4a56-b312-79e747eb9fb4-catalog-content\") pod \"redhat-operators-4dvck\" (UID: \"de618ad7-8216-4a56-b312-79e747eb9fb4\") " pod="openshift-marketplace/redhat-operators-4dvck" Nov 28 11:40:49 crc kubenswrapper[4923]: I1128 11:40:49.522678 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de618ad7-8216-4a56-b312-79e747eb9fb4-utilities\") pod \"redhat-operators-4dvck\" (UID: \"de618ad7-8216-4a56-b312-79e747eb9fb4\") " pod="openshift-marketplace/redhat-operators-4dvck" Nov 28 11:40:49 crc kubenswrapper[4923]: I1128 11:40:49.544815 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bbs8\" (UniqueName: \"kubernetes.io/projected/de618ad7-8216-4a56-b312-79e747eb9fb4-kube-api-access-5bbs8\") pod \"redhat-operators-4dvck\" (UID: \"de618ad7-8216-4a56-b312-79e747eb9fb4\") " pod="openshift-marketplace/redhat-operators-4dvck" Nov 28 11:40:49 crc kubenswrapper[4923]: I1128 11:40:49.709001 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4dvck" Nov 28 11:40:50 crc kubenswrapper[4923]: I1128 11:40:50.142682 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh" event={"ID":"97a4f32b-623f-4eda-9418-9edc6c64a043","Type":"ContainerStarted","Data":"d1337c3a62f436a45f03043d2f64cffbe23ae3cc98f40c319864a166eb9e4701"} Nov 28 11:40:50 crc kubenswrapper[4923]: I1128 11:40:50.161001 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh" podStartSLOduration=1.67342909 podStartE2EDuration="2.160981366s" podCreationTimestamp="2025-11-28 11:40:48 +0000 UTC" firstStartedPulling="2025-11-28 11:40:49.089720753 +0000 UTC m=+1928.218404963" lastFinishedPulling="2025-11-28 11:40:49.577273019 +0000 UTC m=+1928.705957239" observedRunningTime="2025-11-28 11:40:50.155159681 +0000 UTC m=+1929.283843901" watchObservedRunningTime="2025-11-28 11:40:50.160981366 +0000 UTC m=+1929.289665576" Nov 28 11:40:50 crc kubenswrapper[4923]: I1128 11:40:50.182990 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4dvck"] Nov 28 11:40:50 crc kubenswrapper[4923]: W1128 11:40:50.192344 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde618ad7_8216_4a56_b312_79e747eb9fb4.slice/crio-3dd4f5f72db3d802309178fcfa73c370ca3729bbae470ef0563dfe6dea3abc5a WatchSource:0}: Error finding container 3dd4f5f72db3d802309178fcfa73c370ca3729bbae470ef0563dfe6dea3abc5a: Status 404 returned error can't find the container with id 3dd4f5f72db3d802309178fcfa73c370ca3729bbae470ef0563dfe6dea3abc5a Nov 28 11:40:51 crc kubenswrapper[4923]: I1128 11:40:51.150038 4923 generic.go:334] "Generic (PLEG): container finished" podID="de618ad7-8216-4a56-b312-79e747eb9fb4" containerID="b4d78f592c1609b59d068da090ac01c02dcd95599480b1a871cadd44ee96af5e" exitCode=0 Nov 28 11:40:51 crc kubenswrapper[4923]: I1128 11:40:51.150137 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4dvck" event={"ID":"de618ad7-8216-4a56-b312-79e747eb9fb4","Type":"ContainerDied","Data":"b4d78f592c1609b59d068da090ac01c02dcd95599480b1a871cadd44ee96af5e"} Nov 28 11:40:51 crc kubenswrapper[4923]: I1128 11:40:51.150409 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4dvck" event={"ID":"de618ad7-8216-4a56-b312-79e747eb9fb4","Type":"ContainerStarted","Data":"3dd4f5f72db3d802309178fcfa73c370ca3729bbae470ef0563dfe6dea3abc5a"} Nov 28 11:40:53 crc kubenswrapper[4923]: I1128 11:40:53.182272 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4dvck" event={"ID":"de618ad7-8216-4a56-b312-79e747eb9fb4","Type":"ContainerStarted","Data":"0cfc59ce8560796e11ea18e2453a79f3dcd9cde1ae228db8a694ae33770d08c9"} Nov 28 11:41:00 crc kubenswrapper[4923]: I1128 11:41:00.247175 4923 generic.go:334] "Generic (PLEG): container finished" podID="97a4f32b-623f-4eda-9418-9edc6c64a043" containerID="d1337c3a62f436a45f03043d2f64cffbe23ae3cc98f40c319864a166eb9e4701" exitCode=0 Nov 28 11:41:00 crc kubenswrapper[4923]: I1128 11:41:00.247328 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh" event={"ID":"97a4f32b-623f-4eda-9418-9edc6c64a043","Type":"ContainerDied","Data":"d1337c3a62f436a45f03043d2f64cffbe23ae3cc98f40c319864a166eb9e4701"} Nov 28 11:41:00 crc kubenswrapper[4923]: I1128 11:41:00.252049 4923 generic.go:334] "Generic (PLEG): container finished" podID="de618ad7-8216-4a56-b312-79e747eb9fb4" containerID="0cfc59ce8560796e11ea18e2453a79f3dcd9cde1ae228db8a694ae33770d08c9" exitCode=0 Nov 28 11:41:00 crc kubenswrapper[4923]: I1128 11:41:00.252089 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4dvck" event={"ID":"de618ad7-8216-4a56-b312-79e747eb9fb4","Type":"ContainerDied","Data":"0cfc59ce8560796e11ea18e2453a79f3dcd9cde1ae228db8a694ae33770d08c9"} Nov 28 11:41:00 crc kubenswrapper[4923]: I1128 11:41:00.255158 4923 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 11:41:01 crc kubenswrapper[4923]: I1128 11:41:01.266337 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4dvck" event={"ID":"de618ad7-8216-4a56-b312-79e747eb9fb4","Type":"ContainerStarted","Data":"20b1147b50d4e2d29a3b51192e2112defc4b6385ee1e67b129584639630a2147"} Nov 28 11:41:01 crc kubenswrapper[4923]: I1128 11:41:01.294450 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4dvck" podStartSLOduration=2.686906171 podStartE2EDuration="12.294423778s" podCreationTimestamp="2025-11-28 11:40:49 +0000 UTC" firstStartedPulling="2025-11-28 11:40:51.152027621 +0000 UTC m=+1930.280711831" lastFinishedPulling="2025-11-28 11:41:00.759545228 +0000 UTC m=+1939.888229438" observedRunningTime="2025-11-28 11:41:01.287140852 +0000 UTC m=+1940.415825082" watchObservedRunningTime="2025-11-28 11:41:01.294423778 +0000 UTC m=+1940.423108038" Nov 28 11:41:01 crc kubenswrapper[4923]: I1128 11:41:01.727478 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh" Nov 28 11:41:01 crc kubenswrapper[4923]: I1128 11:41:01.766989 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/97a4f32b-623f-4eda-9418-9edc6c64a043-ssh-key\") pod \"97a4f32b-623f-4eda-9418-9edc6c64a043\" (UID: \"97a4f32b-623f-4eda-9418-9edc6c64a043\") " Nov 28 11:41:01 crc kubenswrapper[4923]: I1128 11:41:01.767038 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mh88v\" (UniqueName: \"kubernetes.io/projected/97a4f32b-623f-4eda-9418-9edc6c64a043-kube-api-access-mh88v\") pod \"97a4f32b-623f-4eda-9418-9edc6c64a043\" (UID: \"97a4f32b-623f-4eda-9418-9edc6c64a043\") " Nov 28 11:41:01 crc kubenswrapper[4923]: I1128 11:41:01.767149 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/97a4f32b-623f-4eda-9418-9edc6c64a043-inventory\") pod \"97a4f32b-623f-4eda-9418-9edc6c64a043\" (UID: \"97a4f32b-623f-4eda-9418-9edc6c64a043\") " Nov 28 11:41:01 crc kubenswrapper[4923]: I1128 11:41:01.800279 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97a4f32b-623f-4eda-9418-9edc6c64a043-kube-api-access-mh88v" (OuterVolumeSpecName: "kube-api-access-mh88v") pod "97a4f32b-623f-4eda-9418-9edc6c64a043" (UID: "97a4f32b-623f-4eda-9418-9edc6c64a043"). InnerVolumeSpecName "kube-api-access-mh88v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:41:01 crc kubenswrapper[4923]: I1128 11:41:01.803792 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97a4f32b-623f-4eda-9418-9edc6c64a043-inventory" (OuterVolumeSpecName: "inventory") pod "97a4f32b-623f-4eda-9418-9edc6c64a043" (UID: "97a4f32b-623f-4eda-9418-9edc6c64a043"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:41:01 crc kubenswrapper[4923]: I1128 11:41:01.828065 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97a4f32b-623f-4eda-9418-9edc6c64a043-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "97a4f32b-623f-4eda-9418-9edc6c64a043" (UID: "97a4f32b-623f-4eda-9418-9edc6c64a043"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:41:01 crc kubenswrapper[4923]: I1128 11:41:01.869273 4923 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/97a4f32b-623f-4eda-9418-9edc6c64a043-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 11:41:01 crc kubenswrapper[4923]: I1128 11:41:01.869315 4923 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/97a4f32b-623f-4eda-9418-9edc6c64a043-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 11:41:01 crc kubenswrapper[4923]: I1128 11:41:01.869329 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mh88v\" (UniqueName: \"kubernetes.io/projected/97a4f32b-623f-4eda-9418-9edc6c64a043-kube-api-access-mh88v\") on node \"crc\" DevicePath \"\"" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.291512 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh" event={"ID":"97a4f32b-623f-4eda-9418-9edc6c64a043","Type":"ContainerDied","Data":"3e33a981ea6e0a18984b99845c3c8d86614ebed7420260c0bca4211d71b91954"} Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.291703 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-mtsmh" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.296199 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e33a981ea6e0a18984b99845c3c8d86614ebed7420260c0bca4211d71b91954" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.356730 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl"] Nov 28 11:41:02 crc kubenswrapper[4923]: E1128 11:41:02.358702 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97a4f32b-623f-4eda-9418-9edc6c64a043" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.358725 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="97a4f32b-623f-4eda-9418-9edc6c64a043" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.358927 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="97a4f32b-623f-4eda-9418-9edc6c64a043" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.359487 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.361915 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.362282 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-2xnkl" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.362495 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.369277 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl"] Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.376472 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.378735 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0577aa66-e25a-4198-84a3-db8becccbbf6-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl\" (UID: \"0577aa66-e25a-4198-84a3-db8becccbbf6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.379575 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfgpg\" (UniqueName: \"kubernetes.io/projected/0577aa66-e25a-4198-84a3-db8becccbbf6-kube-api-access-lfgpg\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl\" (UID: \"0577aa66-e25a-4198-84a3-db8becccbbf6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.381895 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0577aa66-e25a-4198-84a3-db8becccbbf6-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl\" (UID: \"0577aa66-e25a-4198-84a3-db8becccbbf6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.483993 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0577aa66-e25a-4198-84a3-db8becccbbf6-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl\" (UID: \"0577aa66-e25a-4198-84a3-db8becccbbf6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.484418 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfgpg\" (UniqueName: \"kubernetes.io/projected/0577aa66-e25a-4198-84a3-db8becccbbf6-kube-api-access-lfgpg\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl\" (UID: \"0577aa66-e25a-4198-84a3-db8becccbbf6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.484637 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0577aa66-e25a-4198-84a3-db8becccbbf6-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl\" (UID: \"0577aa66-e25a-4198-84a3-db8becccbbf6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.489900 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0577aa66-e25a-4198-84a3-db8becccbbf6-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl\" (UID: \"0577aa66-e25a-4198-84a3-db8becccbbf6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.499767 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0577aa66-e25a-4198-84a3-db8becccbbf6-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl\" (UID: \"0577aa66-e25a-4198-84a3-db8becccbbf6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.513358 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfgpg\" (UniqueName: \"kubernetes.io/projected/0577aa66-e25a-4198-84a3-db8becccbbf6-kube-api-access-lfgpg\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl\" (UID: \"0577aa66-e25a-4198-84a3-db8becccbbf6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl" Nov 28 11:41:02 crc kubenswrapper[4923]: I1128 11:41:02.684988 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl" Nov 28 11:41:03 crc kubenswrapper[4923]: I1128 11:41:03.015477 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl"] Nov 28 11:41:03 crc kubenswrapper[4923]: I1128 11:41:03.302191 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl" event={"ID":"0577aa66-e25a-4198-84a3-db8becccbbf6","Type":"ContainerStarted","Data":"637016b4daf1e8fdbb4fa87dc159a32d0d071d50df9f74714a8411d45897d24f"} Nov 28 11:41:04 crc kubenswrapper[4923]: I1128 11:41:04.311796 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl" event={"ID":"0577aa66-e25a-4198-84a3-db8becccbbf6","Type":"ContainerStarted","Data":"37284582d340124422ef7675b12745a66534efd48defee850bc9ad88ecbd138b"} Nov 28 11:41:04 crc kubenswrapper[4923]: I1128 11:41:04.334297 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl" podStartSLOduration=1.883756273 podStartE2EDuration="2.334281528s" podCreationTimestamp="2025-11-28 11:41:02 +0000 UTC" firstStartedPulling="2025-11-28 11:41:03.026108936 +0000 UTC m=+1942.154793146" lastFinishedPulling="2025-11-28 11:41:03.476634191 +0000 UTC m=+1942.605318401" observedRunningTime="2025-11-28 11:41:04.331373455 +0000 UTC m=+1943.460057685" watchObservedRunningTime="2025-11-28 11:41:04.334281528 +0000 UTC m=+1943.462965738" Nov 28 11:41:08 crc kubenswrapper[4923]: I1128 11:41:08.135038 4923 scope.go:117] "RemoveContainer" containerID="c96f5654fa63d25dd422d3bae27f4ce71673d7e8b016ec7225a9b8070f0e5bf8" Nov 28 11:41:09 crc kubenswrapper[4923]: I1128 11:41:09.710351 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4dvck" Nov 28 11:41:09 crc kubenswrapper[4923]: I1128 11:41:09.710758 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4dvck" Nov 28 11:41:09 crc kubenswrapper[4923]: I1128 11:41:09.758413 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4dvck" Nov 28 11:41:10 crc kubenswrapper[4923]: I1128 11:41:10.444728 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4dvck" Nov 28 11:41:10 crc kubenswrapper[4923]: I1128 11:41:10.505866 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4dvck"] Nov 28 11:41:12 crc kubenswrapper[4923]: I1128 11:41:12.384375 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4dvck" podUID="de618ad7-8216-4a56-b312-79e747eb9fb4" containerName="registry-server" containerID="cri-o://20b1147b50d4e2d29a3b51192e2112defc4b6385ee1e67b129584639630a2147" gracePeriod=2 Nov 28 11:41:12 crc kubenswrapper[4923]: I1128 11:41:12.865728 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4dvck" Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.001579 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de618ad7-8216-4a56-b312-79e747eb9fb4-utilities\") pod \"de618ad7-8216-4a56-b312-79e747eb9fb4\" (UID: \"de618ad7-8216-4a56-b312-79e747eb9fb4\") " Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.002014 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de618ad7-8216-4a56-b312-79e747eb9fb4-catalog-content\") pod \"de618ad7-8216-4a56-b312-79e747eb9fb4\" (UID: \"de618ad7-8216-4a56-b312-79e747eb9fb4\") " Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.002581 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de618ad7-8216-4a56-b312-79e747eb9fb4-utilities" (OuterVolumeSpecName: "utilities") pod "de618ad7-8216-4a56-b312-79e747eb9fb4" (UID: "de618ad7-8216-4a56-b312-79e747eb9fb4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.015979 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5bbs8\" (UniqueName: \"kubernetes.io/projected/de618ad7-8216-4a56-b312-79e747eb9fb4-kube-api-access-5bbs8\") pod \"de618ad7-8216-4a56-b312-79e747eb9fb4\" (UID: \"de618ad7-8216-4a56-b312-79e747eb9fb4\") " Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.016644 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de618ad7-8216-4a56-b312-79e747eb9fb4-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.021175 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de618ad7-8216-4a56-b312-79e747eb9fb4-kube-api-access-5bbs8" (OuterVolumeSpecName: "kube-api-access-5bbs8") pod "de618ad7-8216-4a56-b312-79e747eb9fb4" (UID: "de618ad7-8216-4a56-b312-79e747eb9fb4"). InnerVolumeSpecName "kube-api-access-5bbs8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.119613 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5bbs8\" (UniqueName: \"kubernetes.io/projected/de618ad7-8216-4a56-b312-79e747eb9fb4-kube-api-access-5bbs8\") on node \"crc\" DevicePath \"\"" Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.127447 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de618ad7-8216-4a56-b312-79e747eb9fb4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "de618ad7-8216-4a56-b312-79e747eb9fb4" (UID: "de618ad7-8216-4a56-b312-79e747eb9fb4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.220952 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de618ad7-8216-4a56-b312-79e747eb9fb4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.393729 4923 generic.go:334] "Generic (PLEG): container finished" podID="de618ad7-8216-4a56-b312-79e747eb9fb4" containerID="20b1147b50d4e2d29a3b51192e2112defc4b6385ee1e67b129584639630a2147" exitCode=0 Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.393786 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4dvck" event={"ID":"de618ad7-8216-4a56-b312-79e747eb9fb4","Type":"ContainerDied","Data":"20b1147b50d4e2d29a3b51192e2112defc4b6385ee1e67b129584639630a2147"} Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.393815 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4dvck" event={"ID":"de618ad7-8216-4a56-b312-79e747eb9fb4","Type":"ContainerDied","Data":"3dd4f5f72db3d802309178fcfa73c370ca3729bbae470ef0563dfe6dea3abc5a"} Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.393833 4923 scope.go:117] "RemoveContainer" containerID="20b1147b50d4e2d29a3b51192e2112defc4b6385ee1e67b129584639630a2147" Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.394651 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4dvck" Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.415905 4923 scope.go:117] "RemoveContainer" containerID="0cfc59ce8560796e11ea18e2453a79f3dcd9cde1ae228db8a694ae33770d08c9" Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.417926 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4dvck"] Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.429271 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4dvck"] Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.475312 4923 scope.go:117] "RemoveContainer" containerID="b4d78f592c1609b59d068da090ac01c02dcd95599480b1a871cadd44ee96af5e" Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.493094 4923 scope.go:117] "RemoveContainer" containerID="20b1147b50d4e2d29a3b51192e2112defc4b6385ee1e67b129584639630a2147" Nov 28 11:41:13 crc kubenswrapper[4923]: E1128 11:41:13.493670 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20b1147b50d4e2d29a3b51192e2112defc4b6385ee1e67b129584639630a2147\": container with ID starting with 20b1147b50d4e2d29a3b51192e2112defc4b6385ee1e67b129584639630a2147 not found: ID does not exist" containerID="20b1147b50d4e2d29a3b51192e2112defc4b6385ee1e67b129584639630a2147" Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.493702 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20b1147b50d4e2d29a3b51192e2112defc4b6385ee1e67b129584639630a2147"} err="failed to get container status \"20b1147b50d4e2d29a3b51192e2112defc4b6385ee1e67b129584639630a2147\": rpc error: code = NotFound desc = could not find container \"20b1147b50d4e2d29a3b51192e2112defc4b6385ee1e67b129584639630a2147\": container with ID starting with 20b1147b50d4e2d29a3b51192e2112defc4b6385ee1e67b129584639630a2147 not found: ID does not exist" Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.493730 4923 scope.go:117] "RemoveContainer" containerID="0cfc59ce8560796e11ea18e2453a79f3dcd9cde1ae228db8a694ae33770d08c9" Nov 28 11:41:13 crc kubenswrapper[4923]: E1128 11:41:13.494129 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0cfc59ce8560796e11ea18e2453a79f3dcd9cde1ae228db8a694ae33770d08c9\": container with ID starting with 0cfc59ce8560796e11ea18e2453a79f3dcd9cde1ae228db8a694ae33770d08c9 not found: ID does not exist" containerID="0cfc59ce8560796e11ea18e2453a79f3dcd9cde1ae228db8a694ae33770d08c9" Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.494177 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0cfc59ce8560796e11ea18e2453a79f3dcd9cde1ae228db8a694ae33770d08c9"} err="failed to get container status \"0cfc59ce8560796e11ea18e2453a79f3dcd9cde1ae228db8a694ae33770d08c9\": rpc error: code = NotFound desc = could not find container \"0cfc59ce8560796e11ea18e2453a79f3dcd9cde1ae228db8a694ae33770d08c9\": container with ID starting with 0cfc59ce8560796e11ea18e2453a79f3dcd9cde1ae228db8a694ae33770d08c9 not found: ID does not exist" Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.494211 4923 scope.go:117] "RemoveContainer" containerID="b4d78f592c1609b59d068da090ac01c02dcd95599480b1a871cadd44ee96af5e" Nov 28 11:41:13 crc kubenswrapper[4923]: E1128 11:41:13.494872 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4d78f592c1609b59d068da090ac01c02dcd95599480b1a871cadd44ee96af5e\": container with ID starting with b4d78f592c1609b59d068da090ac01c02dcd95599480b1a871cadd44ee96af5e not found: ID does not exist" containerID="b4d78f592c1609b59d068da090ac01c02dcd95599480b1a871cadd44ee96af5e" Nov 28 11:41:13 crc kubenswrapper[4923]: I1128 11:41:13.494925 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4d78f592c1609b59d068da090ac01c02dcd95599480b1a871cadd44ee96af5e"} err="failed to get container status \"b4d78f592c1609b59d068da090ac01c02dcd95599480b1a871cadd44ee96af5e\": rpc error: code = NotFound desc = could not find container \"b4d78f592c1609b59d068da090ac01c02dcd95599480b1a871cadd44ee96af5e\": container with ID starting with b4d78f592c1609b59d068da090ac01c02dcd95599480b1a871cadd44ee96af5e not found: ID does not exist" Nov 28 11:41:14 crc kubenswrapper[4923]: I1128 11:41:14.404325 4923 generic.go:334] "Generic (PLEG): container finished" podID="0577aa66-e25a-4198-84a3-db8becccbbf6" containerID="37284582d340124422ef7675b12745a66534efd48defee850bc9ad88ecbd138b" exitCode=0 Nov 28 11:41:14 crc kubenswrapper[4923]: I1128 11:41:14.404399 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl" event={"ID":"0577aa66-e25a-4198-84a3-db8becccbbf6","Type":"ContainerDied","Data":"37284582d340124422ef7675b12745a66534efd48defee850bc9ad88ecbd138b"} Nov 28 11:41:15 crc kubenswrapper[4923]: I1128 11:41:15.182538 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de618ad7-8216-4a56-b312-79e747eb9fb4" path="/var/lib/kubelet/pods/de618ad7-8216-4a56-b312-79e747eb9fb4/volumes" Nov 28 11:41:15 crc kubenswrapper[4923]: I1128 11:41:15.847746 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl" Nov 28 11:41:15 crc kubenswrapper[4923]: I1128 11:41:15.970623 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0577aa66-e25a-4198-84a3-db8becccbbf6-ssh-key\") pod \"0577aa66-e25a-4198-84a3-db8becccbbf6\" (UID: \"0577aa66-e25a-4198-84a3-db8becccbbf6\") " Nov 28 11:41:15 crc kubenswrapper[4923]: I1128 11:41:15.970686 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0577aa66-e25a-4198-84a3-db8becccbbf6-inventory\") pod \"0577aa66-e25a-4198-84a3-db8becccbbf6\" (UID: \"0577aa66-e25a-4198-84a3-db8becccbbf6\") " Nov 28 11:41:15 crc kubenswrapper[4923]: I1128 11:41:15.970756 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lfgpg\" (UniqueName: \"kubernetes.io/projected/0577aa66-e25a-4198-84a3-db8becccbbf6-kube-api-access-lfgpg\") pod \"0577aa66-e25a-4198-84a3-db8becccbbf6\" (UID: \"0577aa66-e25a-4198-84a3-db8becccbbf6\") " Nov 28 11:41:15 crc kubenswrapper[4923]: I1128 11:41:15.979744 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0577aa66-e25a-4198-84a3-db8becccbbf6-kube-api-access-lfgpg" (OuterVolumeSpecName: "kube-api-access-lfgpg") pod "0577aa66-e25a-4198-84a3-db8becccbbf6" (UID: "0577aa66-e25a-4198-84a3-db8becccbbf6"). InnerVolumeSpecName "kube-api-access-lfgpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:41:15 crc kubenswrapper[4923]: I1128 11:41:15.997663 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0577aa66-e25a-4198-84a3-db8becccbbf6-inventory" (OuterVolumeSpecName: "inventory") pod "0577aa66-e25a-4198-84a3-db8becccbbf6" (UID: "0577aa66-e25a-4198-84a3-db8becccbbf6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:41:16 crc kubenswrapper[4923]: I1128 11:41:16.014245 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0577aa66-e25a-4198-84a3-db8becccbbf6-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0577aa66-e25a-4198-84a3-db8becccbbf6" (UID: "0577aa66-e25a-4198-84a3-db8becccbbf6"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:41:16 crc kubenswrapper[4923]: I1128 11:41:16.072444 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lfgpg\" (UniqueName: \"kubernetes.io/projected/0577aa66-e25a-4198-84a3-db8becccbbf6-kube-api-access-lfgpg\") on node \"crc\" DevicePath \"\"" Nov 28 11:41:16 crc kubenswrapper[4923]: I1128 11:41:16.072465 4923 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0577aa66-e25a-4198-84a3-db8becccbbf6-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 11:41:16 crc kubenswrapper[4923]: I1128 11:41:16.072473 4923 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0577aa66-e25a-4198-84a3-db8becccbbf6-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 11:41:16 crc kubenswrapper[4923]: I1128 11:41:16.436511 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl" event={"ID":"0577aa66-e25a-4198-84a3-db8becccbbf6","Type":"ContainerDied","Data":"637016b4daf1e8fdbb4fa87dc159a32d0d071d50df9f74714a8411d45897d24f"} Nov 28 11:41:16 crc kubenswrapper[4923]: I1128 11:41:16.436563 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="637016b4daf1e8fdbb4fa87dc159a32d0d071d50df9f74714a8411d45897d24f" Nov 28 11:41:16 crc kubenswrapper[4923]: I1128 11:41:16.436622 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl" Nov 28 11:42:14 crc kubenswrapper[4923]: I1128 11:42:14.029076 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:42:14 crc kubenswrapper[4923]: I1128 11:42:14.029879 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.104248 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-zr9xk/must-gather-fztwk"] Nov 28 11:42:24 crc kubenswrapper[4923]: E1128 11:42:24.105053 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de618ad7-8216-4a56-b312-79e747eb9fb4" containerName="registry-server" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.105065 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="de618ad7-8216-4a56-b312-79e747eb9fb4" containerName="registry-server" Nov 28 11:42:24 crc kubenswrapper[4923]: E1128 11:42:24.105082 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0577aa66-e25a-4198-84a3-db8becccbbf6" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.105090 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="0577aa66-e25a-4198-84a3-db8becccbbf6" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 28 11:42:24 crc kubenswrapper[4923]: E1128 11:42:24.105108 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de618ad7-8216-4a56-b312-79e747eb9fb4" containerName="extract-utilities" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.105114 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="de618ad7-8216-4a56-b312-79e747eb9fb4" containerName="extract-utilities" Nov 28 11:42:24 crc kubenswrapper[4923]: E1128 11:42:24.105128 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de618ad7-8216-4a56-b312-79e747eb9fb4" containerName="extract-content" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.105134 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="de618ad7-8216-4a56-b312-79e747eb9fb4" containerName="extract-content" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.105279 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="de618ad7-8216-4a56-b312-79e747eb9fb4" containerName="registry-server" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.105290 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="0577aa66-e25a-4198-84a3-db8becccbbf6" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.106117 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zr9xk/must-gather-fztwk" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.109498 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-zr9xk"/"kube-root-ca.crt" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.109697 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-zr9xk"/"default-dockercfg-lzzt4" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.110553 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-zr9xk"/"openshift-service-ca.crt" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.129797 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-zr9xk/must-gather-fztwk"] Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.167614 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-t82c5"] Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.176633 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t82c5" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.177769 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t82c5"] Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.213895 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/dadea3e7-5e33-4e45-a825-ea661d9c5f81-must-gather-output\") pod \"must-gather-fztwk\" (UID: \"dadea3e7-5e33-4e45-a825-ea661d9c5f81\") " pod="openshift-must-gather-zr9xk/must-gather-fztwk" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.214024 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggvc6\" (UniqueName: \"kubernetes.io/projected/dadea3e7-5e33-4e45-a825-ea661d9c5f81-kube-api-access-ggvc6\") pod \"must-gather-fztwk\" (UID: \"dadea3e7-5e33-4e45-a825-ea661d9c5f81\") " pod="openshift-must-gather-zr9xk/must-gather-fztwk" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.315818 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/dadea3e7-5e33-4e45-a825-ea661d9c5f81-must-gather-output\") pod \"must-gather-fztwk\" (UID: \"dadea3e7-5e33-4e45-a825-ea661d9c5f81\") " pod="openshift-must-gather-zr9xk/must-gather-fztwk" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.315885 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rqz8\" (UniqueName: \"kubernetes.io/projected/5016d362-b272-42a6-a154-00660cf52894-kube-api-access-8rqz8\") pod \"community-operators-t82c5\" (UID: \"5016d362-b272-42a6-a154-00660cf52894\") " pod="openshift-marketplace/community-operators-t82c5" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.315972 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggvc6\" (UniqueName: \"kubernetes.io/projected/dadea3e7-5e33-4e45-a825-ea661d9c5f81-kube-api-access-ggvc6\") pod \"must-gather-fztwk\" (UID: \"dadea3e7-5e33-4e45-a825-ea661d9c5f81\") " pod="openshift-must-gather-zr9xk/must-gather-fztwk" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.316017 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5016d362-b272-42a6-a154-00660cf52894-utilities\") pod \"community-operators-t82c5\" (UID: \"5016d362-b272-42a6-a154-00660cf52894\") " pod="openshift-marketplace/community-operators-t82c5" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.316112 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5016d362-b272-42a6-a154-00660cf52894-catalog-content\") pod \"community-operators-t82c5\" (UID: \"5016d362-b272-42a6-a154-00660cf52894\") " pod="openshift-marketplace/community-operators-t82c5" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.316273 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/dadea3e7-5e33-4e45-a825-ea661d9c5f81-must-gather-output\") pod \"must-gather-fztwk\" (UID: \"dadea3e7-5e33-4e45-a825-ea661d9c5f81\") " pod="openshift-must-gather-zr9xk/must-gather-fztwk" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.362541 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggvc6\" (UniqueName: \"kubernetes.io/projected/dadea3e7-5e33-4e45-a825-ea661d9c5f81-kube-api-access-ggvc6\") pod \"must-gather-fztwk\" (UID: \"dadea3e7-5e33-4e45-a825-ea661d9c5f81\") " pod="openshift-must-gather-zr9xk/must-gather-fztwk" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.417870 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5016d362-b272-42a6-a154-00660cf52894-catalog-content\") pod \"community-operators-t82c5\" (UID: \"5016d362-b272-42a6-a154-00660cf52894\") " pod="openshift-marketplace/community-operators-t82c5" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.418170 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rqz8\" (UniqueName: \"kubernetes.io/projected/5016d362-b272-42a6-a154-00660cf52894-kube-api-access-8rqz8\") pod \"community-operators-t82c5\" (UID: \"5016d362-b272-42a6-a154-00660cf52894\") " pod="openshift-marketplace/community-operators-t82c5" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.418235 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5016d362-b272-42a6-a154-00660cf52894-utilities\") pod \"community-operators-t82c5\" (UID: \"5016d362-b272-42a6-a154-00660cf52894\") " pod="openshift-marketplace/community-operators-t82c5" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.418375 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5016d362-b272-42a6-a154-00660cf52894-catalog-content\") pod \"community-operators-t82c5\" (UID: \"5016d362-b272-42a6-a154-00660cf52894\") " pod="openshift-marketplace/community-operators-t82c5" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.418501 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5016d362-b272-42a6-a154-00660cf52894-utilities\") pod \"community-operators-t82c5\" (UID: \"5016d362-b272-42a6-a154-00660cf52894\") " pod="openshift-marketplace/community-operators-t82c5" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.421223 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zr9xk/must-gather-fztwk" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.434634 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rqz8\" (UniqueName: \"kubernetes.io/projected/5016d362-b272-42a6-a154-00660cf52894-kube-api-access-8rqz8\") pod \"community-operators-t82c5\" (UID: \"5016d362-b272-42a6-a154-00660cf52894\") " pod="openshift-marketplace/community-operators-t82c5" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.491100 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t82c5" Nov 28 11:42:24 crc kubenswrapper[4923]: I1128 11:42:24.976780 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-zr9xk/must-gather-fztwk"] Nov 28 11:42:25 crc kubenswrapper[4923]: I1128 11:42:25.125949 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t82c5"] Nov 28 11:42:25 crc kubenswrapper[4923]: W1128 11:42:25.133904 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5016d362_b272_42a6_a154_00660cf52894.slice/crio-29cbb8b4ee4f6ce4ae290c444b1ffbfa2d2df50e9d9a2b3e454064bc9b219fe3 WatchSource:0}: Error finding container 29cbb8b4ee4f6ce4ae290c444b1ffbfa2d2df50e9d9a2b3e454064bc9b219fe3: Status 404 returned error can't find the container with id 29cbb8b4ee4f6ce4ae290c444b1ffbfa2d2df50e9d9a2b3e454064bc9b219fe3 Nov 28 11:42:25 crc kubenswrapper[4923]: I1128 11:42:25.192060 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t82c5" event={"ID":"5016d362-b272-42a6-a154-00660cf52894","Type":"ContainerStarted","Data":"29cbb8b4ee4f6ce4ae290c444b1ffbfa2d2df50e9d9a2b3e454064bc9b219fe3"} Nov 28 11:42:25 crc kubenswrapper[4923]: I1128 11:42:25.192098 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zr9xk/must-gather-fztwk" event={"ID":"dadea3e7-5e33-4e45-a825-ea661d9c5f81","Type":"ContainerStarted","Data":"704a03112ddf546d7a251de90276eb8586d640646ad0599f1ef9ca0d95f1e5b5"} Nov 28 11:42:26 crc kubenswrapper[4923]: I1128 11:42:26.186704 4923 generic.go:334] "Generic (PLEG): container finished" podID="5016d362-b272-42a6-a154-00660cf52894" containerID="34021a528401d7c69a86edff2397458a418f69416d608b8f473307d8181f9a0c" exitCode=0 Nov 28 11:42:26 crc kubenswrapper[4923]: I1128 11:42:26.186815 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t82c5" event={"ID":"5016d362-b272-42a6-a154-00660cf52894","Type":"ContainerDied","Data":"34021a528401d7c69a86edff2397458a418f69416d608b8f473307d8181f9a0c"} Nov 28 11:42:28 crc kubenswrapper[4923]: I1128 11:42:28.208366 4923 generic.go:334] "Generic (PLEG): container finished" podID="5016d362-b272-42a6-a154-00660cf52894" containerID="60491a610b0480a1129335e9ac20a40aa116537af7f9a7815e23656b559f16b5" exitCode=0 Nov 28 11:42:28 crc kubenswrapper[4923]: I1128 11:42:28.208519 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t82c5" event={"ID":"5016d362-b272-42a6-a154-00660cf52894","Type":"ContainerDied","Data":"60491a610b0480a1129335e9ac20a40aa116537af7f9a7815e23656b559f16b5"} Nov 28 11:42:35 crc kubenswrapper[4923]: I1128 11:42:35.298369 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t82c5" event={"ID":"5016d362-b272-42a6-a154-00660cf52894","Type":"ContainerStarted","Data":"45259be3d12106b06db24275644b888070e8472130fe0966b39b2ed624415c48"} Nov 28 11:42:35 crc kubenswrapper[4923]: I1128 11:42:35.308972 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zr9xk/must-gather-fztwk" event={"ID":"dadea3e7-5e33-4e45-a825-ea661d9c5f81","Type":"ContainerStarted","Data":"5893b5d45d617f66677d4c64363d1726b36f265567e37e6ddfd0fdf70d9ceb6a"} Nov 28 11:42:35 crc kubenswrapper[4923]: I1128 11:42:35.309024 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zr9xk/must-gather-fztwk" event={"ID":"dadea3e7-5e33-4e45-a825-ea661d9c5f81","Type":"ContainerStarted","Data":"3c9ef6c67fd0302b0c7b56293f83f828cd181aeb1421582653bd50ed2787e563"} Nov 28 11:42:35 crc kubenswrapper[4923]: I1128 11:42:35.322124 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-t82c5" podStartSLOduration=3.343919536 podStartE2EDuration="11.322052535s" podCreationTimestamp="2025-11-28 11:42:24 +0000 UTC" firstStartedPulling="2025-11-28 11:42:26.188614003 +0000 UTC m=+2025.317298213" lastFinishedPulling="2025-11-28 11:42:34.166746972 +0000 UTC m=+2033.295431212" observedRunningTime="2025-11-28 11:42:35.320650345 +0000 UTC m=+2034.449334595" watchObservedRunningTime="2025-11-28 11:42:35.322052535 +0000 UTC m=+2034.450736735" Nov 28 11:42:35 crc kubenswrapper[4923]: I1128 11:42:35.341083 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-zr9xk/must-gather-fztwk" podStartSLOduration=2.130966136 podStartE2EDuration="11.341065384s" podCreationTimestamp="2025-11-28 11:42:24 +0000 UTC" firstStartedPulling="2025-11-28 11:42:24.993989774 +0000 UTC m=+2024.122673984" lastFinishedPulling="2025-11-28 11:42:34.204089012 +0000 UTC m=+2033.332773232" observedRunningTime="2025-11-28 11:42:35.337178654 +0000 UTC m=+2034.465862884" watchObservedRunningTime="2025-11-28 11:42:35.341065384 +0000 UTC m=+2034.469749594" Nov 28 11:42:38 crc kubenswrapper[4923]: I1128 11:42:38.945667 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-zr9xk/crc-debug-447r6"] Nov 28 11:42:38 crc kubenswrapper[4923]: I1128 11:42:38.947148 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zr9xk/crc-debug-447r6" Nov 28 11:42:39 crc kubenswrapper[4923]: I1128 11:42:39.105325 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9d025c3d-0ca6-4f24-8a3c-63bbaef678ad-host\") pod \"crc-debug-447r6\" (UID: \"9d025c3d-0ca6-4f24-8a3c-63bbaef678ad\") " pod="openshift-must-gather-zr9xk/crc-debug-447r6" Nov 28 11:42:39 crc kubenswrapper[4923]: I1128 11:42:39.105444 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njrrm\" (UniqueName: \"kubernetes.io/projected/9d025c3d-0ca6-4f24-8a3c-63bbaef678ad-kube-api-access-njrrm\") pod \"crc-debug-447r6\" (UID: \"9d025c3d-0ca6-4f24-8a3c-63bbaef678ad\") " pod="openshift-must-gather-zr9xk/crc-debug-447r6" Nov 28 11:42:39 crc kubenswrapper[4923]: I1128 11:42:39.206923 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9d025c3d-0ca6-4f24-8a3c-63bbaef678ad-host\") pod \"crc-debug-447r6\" (UID: \"9d025c3d-0ca6-4f24-8a3c-63bbaef678ad\") " pod="openshift-must-gather-zr9xk/crc-debug-447r6" Nov 28 11:42:39 crc kubenswrapper[4923]: I1128 11:42:39.207081 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9d025c3d-0ca6-4f24-8a3c-63bbaef678ad-host\") pod \"crc-debug-447r6\" (UID: \"9d025c3d-0ca6-4f24-8a3c-63bbaef678ad\") " pod="openshift-must-gather-zr9xk/crc-debug-447r6" Nov 28 11:42:39 crc kubenswrapper[4923]: I1128 11:42:39.207093 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njrrm\" (UniqueName: \"kubernetes.io/projected/9d025c3d-0ca6-4f24-8a3c-63bbaef678ad-kube-api-access-njrrm\") pod \"crc-debug-447r6\" (UID: \"9d025c3d-0ca6-4f24-8a3c-63bbaef678ad\") " pod="openshift-must-gather-zr9xk/crc-debug-447r6" Nov 28 11:42:39 crc kubenswrapper[4923]: I1128 11:42:39.231655 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njrrm\" (UniqueName: \"kubernetes.io/projected/9d025c3d-0ca6-4f24-8a3c-63bbaef678ad-kube-api-access-njrrm\") pod \"crc-debug-447r6\" (UID: \"9d025c3d-0ca6-4f24-8a3c-63bbaef678ad\") " pod="openshift-must-gather-zr9xk/crc-debug-447r6" Nov 28 11:42:39 crc kubenswrapper[4923]: I1128 11:42:39.263035 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zr9xk/crc-debug-447r6" Nov 28 11:42:39 crc kubenswrapper[4923]: I1128 11:42:39.340607 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zr9xk/crc-debug-447r6" event={"ID":"9d025c3d-0ca6-4f24-8a3c-63bbaef678ad","Type":"ContainerStarted","Data":"85ed5b81eb44ba6a3348457af11f1688b698b204ce5f41e4ddd912e0faa5e51b"} Nov 28 11:42:44 crc kubenswrapper[4923]: I1128 11:42:44.025794 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:42:44 crc kubenswrapper[4923]: I1128 11:42:44.026253 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:42:44 crc kubenswrapper[4923]: I1128 11:42:44.491772 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-t82c5" Nov 28 11:42:44 crc kubenswrapper[4923]: I1128 11:42:44.491829 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-t82c5" Nov 28 11:42:44 crc kubenswrapper[4923]: I1128 11:42:44.563107 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-t82c5" Nov 28 11:42:45 crc kubenswrapper[4923]: I1128 11:42:45.458193 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-t82c5" Nov 28 11:42:45 crc kubenswrapper[4923]: I1128 11:42:45.511374 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t82c5"] Nov 28 11:42:47 crc kubenswrapper[4923]: I1128 11:42:47.417896 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-t82c5" podUID="5016d362-b272-42a6-a154-00660cf52894" containerName="registry-server" containerID="cri-o://45259be3d12106b06db24275644b888070e8472130fe0966b39b2ed624415c48" gracePeriod=2 Nov 28 11:42:48 crc kubenswrapper[4923]: I1128 11:42:48.426894 4923 generic.go:334] "Generic (PLEG): container finished" podID="5016d362-b272-42a6-a154-00660cf52894" containerID="45259be3d12106b06db24275644b888070e8472130fe0966b39b2ed624415c48" exitCode=0 Nov 28 11:42:48 crc kubenswrapper[4923]: I1128 11:42:48.427224 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t82c5" event={"ID":"5016d362-b272-42a6-a154-00660cf52894","Type":"ContainerDied","Data":"45259be3d12106b06db24275644b888070e8472130fe0966b39b2ed624415c48"} Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.072135 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t82c5" Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.257622 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rqz8\" (UniqueName: \"kubernetes.io/projected/5016d362-b272-42a6-a154-00660cf52894-kube-api-access-8rqz8\") pod \"5016d362-b272-42a6-a154-00660cf52894\" (UID: \"5016d362-b272-42a6-a154-00660cf52894\") " Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.258331 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5016d362-b272-42a6-a154-00660cf52894-utilities\") pod \"5016d362-b272-42a6-a154-00660cf52894\" (UID: \"5016d362-b272-42a6-a154-00660cf52894\") " Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.258442 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5016d362-b272-42a6-a154-00660cf52894-catalog-content\") pod \"5016d362-b272-42a6-a154-00660cf52894\" (UID: \"5016d362-b272-42a6-a154-00660cf52894\") " Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.258889 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5016d362-b272-42a6-a154-00660cf52894-utilities" (OuterVolumeSpecName: "utilities") pod "5016d362-b272-42a6-a154-00660cf52894" (UID: "5016d362-b272-42a6-a154-00660cf52894"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.264537 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5016d362-b272-42a6-a154-00660cf52894-kube-api-access-8rqz8" (OuterVolumeSpecName: "kube-api-access-8rqz8") pod "5016d362-b272-42a6-a154-00660cf52894" (UID: "5016d362-b272-42a6-a154-00660cf52894"). InnerVolumeSpecName "kube-api-access-8rqz8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.307167 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5016d362-b272-42a6-a154-00660cf52894-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5016d362-b272-42a6-a154-00660cf52894" (UID: "5016d362-b272-42a6-a154-00660cf52894"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.360638 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5016d362-b272-42a6-a154-00660cf52894-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.360673 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5016d362-b272-42a6-a154-00660cf52894-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.360685 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rqz8\" (UniqueName: \"kubernetes.io/projected/5016d362-b272-42a6-a154-00660cf52894-kube-api-access-8rqz8\") on node \"crc\" DevicePath \"\"" Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.450454 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zr9xk/crc-debug-447r6" event={"ID":"9d025c3d-0ca6-4f24-8a3c-63bbaef678ad","Type":"ContainerStarted","Data":"dd28ef2f6b1c1aea196b35441b0b3d842836059357d2b89923c6c27cb3c78a77"} Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.453455 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t82c5" event={"ID":"5016d362-b272-42a6-a154-00660cf52894","Type":"ContainerDied","Data":"29cbb8b4ee4f6ce4ae290c444b1ffbfa2d2df50e9d9a2b3e454064bc9b219fe3"} Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.453489 4923 scope.go:117] "RemoveContainer" containerID="45259be3d12106b06db24275644b888070e8472130fe0966b39b2ed624415c48" Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.453578 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t82c5" Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.481562 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-zr9xk/crc-debug-447r6" podStartSLOduration=2.017730699 podStartE2EDuration="13.481526058s" podCreationTimestamp="2025-11-28 11:42:38 +0000 UTC" firstStartedPulling="2025-11-28 11:42:39.293383481 +0000 UTC m=+2038.422067691" lastFinishedPulling="2025-11-28 11:42:50.75717884 +0000 UTC m=+2049.885863050" observedRunningTime="2025-11-28 11:42:51.474034476 +0000 UTC m=+2050.602718686" watchObservedRunningTime="2025-11-28 11:42:51.481526058 +0000 UTC m=+2050.610210258" Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.483184 4923 scope.go:117] "RemoveContainer" containerID="60491a610b0480a1129335e9ac20a40aa116537af7f9a7815e23656b559f16b5" Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.517613 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t82c5"] Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.522969 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-t82c5"] Nov 28 11:42:51 crc kubenswrapper[4923]: I1128 11:42:51.536093 4923 scope.go:117] "RemoveContainer" containerID="34021a528401d7c69a86edff2397458a418f69416d608b8f473307d8181f9a0c" Nov 28 11:42:53 crc kubenswrapper[4923]: I1128 11:42:53.180097 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5016d362-b272-42a6-a154-00660cf52894" path="/var/lib/kubelet/pods/5016d362-b272-42a6-a154-00660cf52894/volumes" Nov 28 11:43:08 crc kubenswrapper[4923]: I1128 11:43:08.621786 4923 generic.go:334] "Generic (PLEG): container finished" podID="9d025c3d-0ca6-4f24-8a3c-63bbaef678ad" containerID="dd28ef2f6b1c1aea196b35441b0b3d842836059357d2b89923c6c27cb3c78a77" exitCode=0 Nov 28 11:43:08 crc kubenswrapper[4923]: I1128 11:43:08.621884 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zr9xk/crc-debug-447r6" event={"ID":"9d025c3d-0ca6-4f24-8a3c-63bbaef678ad","Type":"ContainerDied","Data":"dd28ef2f6b1c1aea196b35441b0b3d842836059357d2b89923c6c27cb3c78a77"} Nov 28 11:43:09 crc kubenswrapper[4923]: I1128 11:43:09.740742 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zr9xk/crc-debug-447r6" Nov 28 11:43:09 crc kubenswrapper[4923]: I1128 11:43:09.770079 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-zr9xk/crc-debug-447r6"] Nov 28 11:43:09 crc kubenswrapper[4923]: I1128 11:43:09.778440 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-zr9xk/crc-debug-447r6"] Nov 28 11:43:09 crc kubenswrapper[4923]: I1128 11:43:09.876016 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njrrm\" (UniqueName: \"kubernetes.io/projected/9d025c3d-0ca6-4f24-8a3c-63bbaef678ad-kube-api-access-njrrm\") pod \"9d025c3d-0ca6-4f24-8a3c-63bbaef678ad\" (UID: \"9d025c3d-0ca6-4f24-8a3c-63bbaef678ad\") " Nov 28 11:43:09 crc kubenswrapper[4923]: I1128 11:43:09.876067 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9d025c3d-0ca6-4f24-8a3c-63bbaef678ad-host\") pod \"9d025c3d-0ca6-4f24-8a3c-63bbaef678ad\" (UID: \"9d025c3d-0ca6-4f24-8a3c-63bbaef678ad\") " Nov 28 11:43:09 crc kubenswrapper[4923]: I1128 11:43:09.876241 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9d025c3d-0ca6-4f24-8a3c-63bbaef678ad-host" (OuterVolumeSpecName: "host") pod "9d025c3d-0ca6-4f24-8a3c-63bbaef678ad" (UID: "9d025c3d-0ca6-4f24-8a3c-63bbaef678ad"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:43:09 crc kubenswrapper[4923]: I1128 11:43:09.876523 4923 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9d025c3d-0ca6-4f24-8a3c-63bbaef678ad-host\") on node \"crc\" DevicePath \"\"" Nov 28 11:43:09 crc kubenswrapper[4923]: I1128 11:43:09.881264 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d025c3d-0ca6-4f24-8a3c-63bbaef678ad-kube-api-access-njrrm" (OuterVolumeSpecName: "kube-api-access-njrrm") pod "9d025c3d-0ca6-4f24-8a3c-63bbaef678ad" (UID: "9d025c3d-0ca6-4f24-8a3c-63bbaef678ad"). InnerVolumeSpecName "kube-api-access-njrrm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:43:09 crc kubenswrapper[4923]: I1128 11:43:09.977487 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njrrm\" (UniqueName: \"kubernetes.io/projected/9d025c3d-0ca6-4f24-8a3c-63bbaef678ad-kube-api-access-njrrm\") on node \"crc\" DevicePath \"\"" Nov 28 11:43:10 crc kubenswrapper[4923]: I1128 11:43:10.644144 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="85ed5b81eb44ba6a3348457af11f1688b698b204ce5f41e4ddd912e0faa5e51b" Nov 28 11:43:10 crc kubenswrapper[4923]: I1128 11:43:10.644212 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zr9xk/crc-debug-447r6" Nov 28 11:43:10 crc kubenswrapper[4923]: I1128 11:43:10.976196 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-zr9xk/crc-debug-jzfxc"] Nov 28 11:43:10 crc kubenswrapper[4923]: E1128 11:43:10.976507 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5016d362-b272-42a6-a154-00660cf52894" containerName="registry-server" Nov 28 11:43:10 crc kubenswrapper[4923]: I1128 11:43:10.976519 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="5016d362-b272-42a6-a154-00660cf52894" containerName="registry-server" Nov 28 11:43:10 crc kubenswrapper[4923]: E1128 11:43:10.976537 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5016d362-b272-42a6-a154-00660cf52894" containerName="extract-content" Nov 28 11:43:10 crc kubenswrapper[4923]: I1128 11:43:10.976543 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="5016d362-b272-42a6-a154-00660cf52894" containerName="extract-content" Nov 28 11:43:10 crc kubenswrapper[4923]: E1128 11:43:10.976555 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5016d362-b272-42a6-a154-00660cf52894" containerName="extract-utilities" Nov 28 11:43:10 crc kubenswrapper[4923]: I1128 11:43:10.976560 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="5016d362-b272-42a6-a154-00660cf52894" containerName="extract-utilities" Nov 28 11:43:10 crc kubenswrapper[4923]: E1128 11:43:10.976583 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d025c3d-0ca6-4f24-8a3c-63bbaef678ad" containerName="container-00" Nov 28 11:43:10 crc kubenswrapper[4923]: I1128 11:43:10.976588 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d025c3d-0ca6-4f24-8a3c-63bbaef678ad" containerName="container-00" Nov 28 11:43:10 crc kubenswrapper[4923]: I1128 11:43:10.976738 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d025c3d-0ca6-4f24-8a3c-63bbaef678ad" containerName="container-00" Nov 28 11:43:10 crc kubenswrapper[4923]: I1128 11:43:10.976752 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="5016d362-b272-42a6-a154-00660cf52894" containerName="registry-server" Nov 28 11:43:10 crc kubenswrapper[4923]: I1128 11:43:10.977277 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zr9xk/crc-debug-jzfxc" Nov 28 11:43:11 crc kubenswrapper[4923]: I1128 11:43:11.096071 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llnrj\" (UniqueName: \"kubernetes.io/projected/53cfe1c3-4b9a-43b1-8266-c8a6f23690d3-kube-api-access-llnrj\") pod \"crc-debug-jzfxc\" (UID: \"53cfe1c3-4b9a-43b1-8266-c8a6f23690d3\") " pod="openshift-must-gather-zr9xk/crc-debug-jzfxc" Nov 28 11:43:11 crc kubenswrapper[4923]: I1128 11:43:11.096140 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/53cfe1c3-4b9a-43b1-8266-c8a6f23690d3-host\") pod \"crc-debug-jzfxc\" (UID: \"53cfe1c3-4b9a-43b1-8266-c8a6f23690d3\") " pod="openshift-must-gather-zr9xk/crc-debug-jzfxc" Nov 28 11:43:11 crc kubenswrapper[4923]: I1128 11:43:11.179823 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d025c3d-0ca6-4f24-8a3c-63bbaef678ad" path="/var/lib/kubelet/pods/9d025c3d-0ca6-4f24-8a3c-63bbaef678ad/volumes" Nov 28 11:43:11 crc kubenswrapper[4923]: I1128 11:43:11.197882 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llnrj\" (UniqueName: \"kubernetes.io/projected/53cfe1c3-4b9a-43b1-8266-c8a6f23690d3-kube-api-access-llnrj\") pod \"crc-debug-jzfxc\" (UID: \"53cfe1c3-4b9a-43b1-8266-c8a6f23690d3\") " pod="openshift-must-gather-zr9xk/crc-debug-jzfxc" Nov 28 11:43:11 crc kubenswrapper[4923]: I1128 11:43:11.198048 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/53cfe1c3-4b9a-43b1-8266-c8a6f23690d3-host\") pod \"crc-debug-jzfxc\" (UID: \"53cfe1c3-4b9a-43b1-8266-c8a6f23690d3\") " pod="openshift-must-gather-zr9xk/crc-debug-jzfxc" Nov 28 11:43:11 crc kubenswrapper[4923]: I1128 11:43:11.198177 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/53cfe1c3-4b9a-43b1-8266-c8a6f23690d3-host\") pod \"crc-debug-jzfxc\" (UID: \"53cfe1c3-4b9a-43b1-8266-c8a6f23690d3\") " pod="openshift-must-gather-zr9xk/crc-debug-jzfxc" Nov 28 11:43:11 crc kubenswrapper[4923]: I1128 11:43:11.219584 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llnrj\" (UniqueName: \"kubernetes.io/projected/53cfe1c3-4b9a-43b1-8266-c8a6f23690d3-kube-api-access-llnrj\") pod \"crc-debug-jzfxc\" (UID: \"53cfe1c3-4b9a-43b1-8266-c8a6f23690d3\") " pod="openshift-must-gather-zr9xk/crc-debug-jzfxc" Nov 28 11:43:11 crc kubenswrapper[4923]: I1128 11:43:11.291328 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zr9xk/crc-debug-jzfxc" Nov 28 11:43:11 crc kubenswrapper[4923]: I1128 11:43:11.652720 4923 generic.go:334] "Generic (PLEG): container finished" podID="53cfe1c3-4b9a-43b1-8266-c8a6f23690d3" containerID="c7133027b0590c8b05e6467399a598ce9d6aa285169e9c3c838d127758f1345a" exitCode=1 Nov 28 11:43:11 crc kubenswrapper[4923]: I1128 11:43:11.652992 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zr9xk/crc-debug-jzfxc" event={"ID":"53cfe1c3-4b9a-43b1-8266-c8a6f23690d3","Type":"ContainerDied","Data":"c7133027b0590c8b05e6467399a598ce9d6aa285169e9c3c838d127758f1345a"} Nov 28 11:43:11 crc kubenswrapper[4923]: I1128 11:43:11.653022 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zr9xk/crc-debug-jzfxc" event={"ID":"53cfe1c3-4b9a-43b1-8266-c8a6f23690d3","Type":"ContainerStarted","Data":"a28a9949b4f84546dddeb9757d3864b1dbf17644ea111fc73ba11ba5b26f0c07"} Nov 28 11:43:11 crc kubenswrapper[4923]: I1128 11:43:11.686599 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-zr9xk/crc-debug-jzfxc"] Nov 28 11:43:11 crc kubenswrapper[4923]: I1128 11:43:11.695986 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-zr9xk/crc-debug-jzfxc"] Nov 28 11:43:12 crc kubenswrapper[4923]: I1128 11:43:12.764186 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zr9xk/crc-debug-jzfxc" Nov 28 11:43:12 crc kubenswrapper[4923]: I1128 11:43:12.941751 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/53cfe1c3-4b9a-43b1-8266-c8a6f23690d3-host\") pod \"53cfe1c3-4b9a-43b1-8266-c8a6f23690d3\" (UID: \"53cfe1c3-4b9a-43b1-8266-c8a6f23690d3\") " Nov 28 11:43:12 crc kubenswrapper[4923]: I1128 11:43:12.941892 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llnrj\" (UniqueName: \"kubernetes.io/projected/53cfe1c3-4b9a-43b1-8266-c8a6f23690d3-kube-api-access-llnrj\") pod \"53cfe1c3-4b9a-43b1-8266-c8a6f23690d3\" (UID: \"53cfe1c3-4b9a-43b1-8266-c8a6f23690d3\") " Nov 28 11:43:12 crc kubenswrapper[4923]: I1128 11:43:12.941813 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/53cfe1c3-4b9a-43b1-8266-c8a6f23690d3-host" (OuterVolumeSpecName: "host") pod "53cfe1c3-4b9a-43b1-8266-c8a6f23690d3" (UID: "53cfe1c3-4b9a-43b1-8266-c8a6f23690d3"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:43:12 crc kubenswrapper[4923]: I1128 11:43:12.956984 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53cfe1c3-4b9a-43b1-8266-c8a6f23690d3-kube-api-access-llnrj" (OuterVolumeSpecName: "kube-api-access-llnrj") pod "53cfe1c3-4b9a-43b1-8266-c8a6f23690d3" (UID: "53cfe1c3-4b9a-43b1-8266-c8a6f23690d3"). InnerVolumeSpecName "kube-api-access-llnrj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:43:13 crc kubenswrapper[4923]: I1128 11:43:13.043573 4923 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/53cfe1c3-4b9a-43b1-8266-c8a6f23690d3-host\") on node \"crc\" DevicePath \"\"" Nov 28 11:43:13 crc kubenswrapper[4923]: I1128 11:43:13.043616 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llnrj\" (UniqueName: \"kubernetes.io/projected/53cfe1c3-4b9a-43b1-8266-c8a6f23690d3-kube-api-access-llnrj\") on node \"crc\" DevicePath \"\"" Nov 28 11:43:13 crc kubenswrapper[4923]: I1128 11:43:13.180674 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53cfe1c3-4b9a-43b1-8266-c8a6f23690d3" path="/var/lib/kubelet/pods/53cfe1c3-4b9a-43b1-8266-c8a6f23690d3/volumes" Nov 28 11:43:13 crc kubenswrapper[4923]: I1128 11:43:13.673919 4923 scope.go:117] "RemoveContainer" containerID="c7133027b0590c8b05e6467399a598ce9d6aa285169e9c3c838d127758f1345a" Nov 28 11:43:13 crc kubenswrapper[4923]: I1128 11:43:13.674011 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zr9xk/crc-debug-jzfxc" Nov 28 11:43:14 crc kubenswrapper[4923]: I1128 11:43:14.026311 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:43:14 crc kubenswrapper[4923]: I1128 11:43:14.026633 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:43:14 crc kubenswrapper[4923]: I1128 11:43:14.026670 4923 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:43:14 crc kubenswrapper[4923]: I1128 11:43:14.027325 4923 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"201e214c150f7a94a55f7f14ac88bad5c3f58ddde6dc1868cf78309362438d26"} pod="openshift-machine-config-operator/machine-config-daemon-bwdth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 11:43:14 crc kubenswrapper[4923]: I1128 11:43:14.027378 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" containerID="cri-o://201e214c150f7a94a55f7f14ac88bad5c3f58ddde6dc1868cf78309362438d26" gracePeriod=600 Nov 28 11:43:14 crc kubenswrapper[4923]: I1128 11:43:14.685892 4923 generic.go:334] "Generic (PLEG): container finished" podID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerID="201e214c150f7a94a55f7f14ac88bad5c3f58ddde6dc1868cf78309362438d26" exitCode=0 Nov 28 11:43:14 crc kubenswrapper[4923]: I1128 11:43:14.685976 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerDied","Data":"201e214c150f7a94a55f7f14ac88bad5c3f58ddde6dc1868cf78309362438d26"} Nov 28 11:43:14 crc kubenswrapper[4923]: I1128 11:43:14.686231 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerStarted","Data":"1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49"} Nov 28 11:43:14 crc kubenswrapper[4923]: I1128 11:43:14.686252 4923 scope.go:117] "RemoveContainer" containerID="59e9391c4a472ec90ba5872638acc6cc579bc7ad3d795096b3c915356fd4186a" Nov 28 11:44:00 crc kubenswrapper[4923]: I1128 11:44:00.879402 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-79797f8fb4-8wx8x_65fad5c3-977a-4ca4-924a-41cac6143073/barbican-api/0.log" Nov 28 11:44:00 crc kubenswrapper[4923]: I1128 11:44:00.959901 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-79797f8fb4-8wx8x_65fad5c3-977a-4ca4-924a-41cac6143073/barbican-api-log/0.log" Nov 28 11:44:01 crc kubenswrapper[4923]: I1128 11:44:01.080039 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-58696cc594-f6zjf_7ffee751-8b25-4777-8623-ced7082ca426/barbican-keystone-listener/0.log" Nov 28 11:44:01 crc kubenswrapper[4923]: I1128 11:44:01.144555 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-58696cc594-f6zjf_7ffee751-8b25-4777-8623-ced7082ca426/barbican-keystone-listener-log/0.log" Nov 28 11:44:01 crc kubenswrapper[4923]: I1128 11:44:01.275226 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-fc7d79659-44chg_a5b68211-f537-41d7-9b3d-859764f26575/barbican-worker/0.log" Nov 28 11:44:01 crc kubenswrapper[4923]: I1128 11:44:01.295311 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-fc7d79659-44chg_a5b68211-f537-41d7-9b3d-859764f26575/barbican-worker-log/0.log" Nov 28 11:44:01 crc kubenswrapper[4923]: I1128 11:44:01.497021 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-pnzvk_ff94fcb0-8bac-4b68-b732-c20bd131c50f/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:44:01 crc kubenswrapper[4923]: I1128 11:44:01.585099 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_70301603-f005-4e2f-90c9-6daedf3d09a9/ceilometer-central-agent/0.log" Nov 28 11:44:01 crc kubenswrapper[4923]: I1128 11:44:01.672618 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_70301603-f005-4e2f-90c9-6daedf3d09a9/ceilometer-notification-agent/0.log" Nov 28 11:44:01 crc kubenswrapper[4923]: I1128 11:44:01.716002 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_70301603-f005-4e2f-90c9-6daedf3d09a9/proxy-httpd/0.log" Nov 28 11:44:01 crc kubenswrapper[4923]: I1128 11:44:01.842554 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_70301603-f005-4e2f-90c9-6daedf3d09a9/sg-core/0.log" Nov 28 11:44:01 crc kubenswrapper[4923]: I1128 11:44:01.919872 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xnbvj_dc2f570f-2c91-4271-9d89-e10d5e0fe601/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:44:02 crc kubenswrapper[4923]: I1128 11:44:02.042171 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_9add781d-9890-4483-b021-0619d4286428/cinder-api/0.log" Nov 28 11:44:02 crc kubenswrapper[4923]: I1128 11:44:02.072385 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_9add781d-9890-4483-b021-0619d4286428/cinder-api-log/0.log" Nov 28 11:44:02 crc kubenswrapper[4923]: I1128 11:44:02.245890 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6/cinder-scheduler/0.log" Nov 28 11:44:02 crc kubenswrapper[4923]: I1128 11:44:02.254183 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_7de6eb3a-4c44-4ac8-b61e-f14bc1a3cfe6/probe/0.log" Nov 28 11:44:02 crc kubenswrapper[4923]: I1128 11:44:02.433762 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-lxjfl_3063c09c-4f2e-4fdb-b9bc-302e69185203/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:44:02 crc kubenswrapper[4923]: I1128 11:44:02.560170 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-nf2p8_7e8e0a19-95c2-4a0a-8847-e7e2fdf016d3/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:44:02 crc kubenswrapper[4923]: I1128 11:44:02.734805 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-79794c8ddf-8zvpb_a3965665-ea65-453f-8139-d611cbbc3833/init/0.log" Nov 28 11:44:02 crc kubenswrapper[4923]: I1128 11:44:02.870827 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-79794c8ddf-8zvpb_a3965665-ea65-453f-8139-d611cbbc3833/init/0.log" Nov 28 11:44:02 crc kubenswrapper[4923]: I1128 11:44:02.920789 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-79794c8ddf-8zvpb_a3965665-ea65-453f-8139-d611cbbc3833/dnsmasq-dns/0.log" Nov 28 11:44:02 crc kubenswrapper[4923]: I1128 11:44:02.984926 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-snpf8_5e780070-fef2-46e6-9c83-029164d61a1d/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:44:03 crc kubenswrapper[4923]: I1128 11:44:03.199212 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-698b96f5d-97vsv_802efe15-184f-4d4b-821f-1e55c9fe5ace/keystone-api/0.log" Nov 28 11:44:03 crc kubenswrapper[4923]: I1128 11:44:03.216663 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_f4883bd9-80ed-4500-806d-d1c2a04ebbbd/kube-state-metrics/0.log" Nov 28 11:44:03 crc kubenswrapper[4923]: I1128 11:44:03.433811 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5fbc45745-sfgcl_71e4dc03-02ac-4443-ad2d-9d47f3f1457b/neutron-api/0.log" Nov 28 11:44:03 crc kubenswrapper[4923]: I1128 11:44:03.720390 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5fbc45745-sfgcl_71e4dc03-02ac-4443-ad2d-9d47f3f1457b/neutron-httpd/0.log" Nov 28 11:44:03 crc kubenswrapper[4923]: I1128 11:44:03.914326 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_89a1a42c-f7a9-448a-87aa-8ea85a021f46/nova-api-api/0.log" Nov 28 11:44:04 crc kubenswrapper[4923]: I1128 11:44:04.050974 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_89a1a42c-f7a9-448a-87aa-8ea85a021f46/nova-api-log/0.log" Nov 28 11:44:04 crc kubenswrapper[4923]: I1128 11:44:04.332283 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_bfece337-1342-4f01-b441-23dc879cb54d/nova-cell0-conductor-conductor/0.log" Nov 28 11:44:04 crc kubenswrapper[4923]: I1128 11:44:04.454545 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_fa5948ea-11c1-4107-b068-e15ba465c8c6/nova-cell1-conductor-conductor/0.log" Nov 28 11:44:04 crc kubenswrapper[4923]: I1128 11:44:04.646139 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_63d46d86-5ddc-4a18-a13a-a49cb248237a/nova-cell1-novncproxy-novncproxy/0.log" Nov 28 11:44:05 crc kubenswrapper[4923]: I1128 11:44:05.236362 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_8128758e-04b7-4c2c-abdc-c8c024262381/nova-metadata-log/0.log" Nov 28 11:44:05 crc kubenswrapper[4923]: I1128 11:44:05.428480 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_9ae9e9bf-102d-4abe-8adc-6720d53d5ebf/nova-scheduler-scheduler/0.log" Nov 28 11:44:05 crc kubenswrapper[4923]: I1128 11:44:05.457704 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_8128758e-04b7-4c2c-abdc-c8c024262381/nova-metadata-metadata/0.log" Nov 28 11:44:05 crc kubenswrapper[4923]: I1128 11:44:05.565687 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_1bddc188-1e43-4efd-9228-ac466ce69994/mysql-bootstrap/0.log" Nov 28 11:44:05 crc kubenswrapper[4923]: I1128 11:44:05.866452 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_1bddc188-1e43-4efd-9228-ac466ce69994/galera/0.log" Nov 28 11:44:05 crc kubenswrapper[4923]: I1128 11:44:05.914414 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_1bddc188-1e43-4efd-9228-ac466ce69994/mysql-bootstrap/0.log" Nov 28 11:44:05 crc kubenswrapper[4923]: I1128 11:44:05.944003 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b55ac65c-e6ce-46ea-83cc-83afef1efcf9/mysql-bootstrap/0.log" Nov 28 11:44:06 crc kubenswrapper[4923]: I1128 11:44:06.091015 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b55ac65c-e6ce-46ea-83cc-83afef1efcf9/mysql-bootstrap/0.log" Nov 28 11:44:06 crc kubenswrapper[4923]: I1128 11:44:06.170329 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b55ac65c-e6ce-46ea-83cc-83afef1efcf9/galera/0.log" Nov 28 11:44:06 crc kubenswrapper[4923]: I1128 11:44:06.194054 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_dac93e05-77d1-4f77-8616-b47fee165189/openstackclient/0.log" Nov 28 11:44:06 crc kubenswrapper[4923]: I1128 11:44:06.516729 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-67x9j_817453fc-6da1-4525-85bf-0d8b22848ff1/ovn-controller/0.log" Nov 28 11:44:06 crc kubenswrapper[4923]: I1128 11:44:06.601603 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-8vd6b_5bb6b6a6-27ae-4724-bd56-4a8f3891595d/openstack-network-exporter/0.log" Nov 28 11:44:06 crc kubenswrapper[4923]: I1128 11:44:06.770532 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-6d9vf_92ee0076-8a34-461d-8af0-1e0739e91266/ovsdb-server-init/0.log" Nov 28 11:44:07 crc kubenswrapper[4923]: I1128 11:44:07.000137 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-6d9vf_92ee0076-8a34-461d-8af0-1e0739e91266/ovsdb-server-init/0.log" Nov 28 11:44:07 crc kubenswrapper[4923]: I1128 11:44:07.066006 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-6d9vf_92ee0076-8a34-461d-8af0-1e0739e91266/ovs-vswitchd/0.log" Nov 28 11:44:07 crc kubenswrapper[4923]: I1128 11:44:07.072040 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-6d9vf_92ee0076-8a34-461d-8af0-1e0739e91266/ovsdb-server/0.log" Nov 28 11:44:07 crc kubenswrapper[4923]: I1128 11:44:07.233842 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_c667717f-96df-453c-af9c-01743e6ec4e2/openstack-network-exporter/0.log" Nov 28 11:44:07 crc kubenswrapper[4923]: I1128 11:44:07.337284 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_c667717f-96df-453c-af9c-01743e6ec4e2/ovn-northd/0.log" Nov 28 11:44:07 crc kubenswrapper[4923]: I1128 11:44:07.443357 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_87cb4282-026f-4c9b-8854-c410a2751727/openstack-network-exporter/0.log" Nov 28 11:44:07 crc kubenswrapper[4923]: I1128 11:44:07.556616 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_87cb4282-026f-4c9b-8854-c410a2751727/ovsdbserver-nb/0.log" Nov 28 11:44:07 crc kubenswrapper[4923]: I1128 11:44:07.644149 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_ed74870d-915b-4790-9e30-02757e0c4e57/openstack-network-exporter/0.log" Nov 28 11:44:07 crc kubenswrapper[4923]: I1128 11:44:07.738793 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_ed74870d-915b-4790-9e30-02757e0c4e57/ovsdbserver-sb/0.log" Nov 28 11:44:07 crc kubenswrapper[4923]: I1128 11:44:07.914028 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-659684c4b8-cg62f_36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2/placement-api/0.log" Nov 28 11:44:07 crc kubenswrapper[4923]: I1128 11:44:07.968535 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-659684c4b8-cg62f_36d8d0f6-8ae1-43d3-8a95-9c703dfe37e2/placement-log/0.log" Nov 28 11:44:08 crc kubenswrapper[4923]: I1128 11:44:08.234195 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_66114922-3d2e-40e1-9d35-84b0960ea5a2/setup-container/0.log" Nov 28 11:44:08 crc kubenswrapper[4923]: I1128 11:44:08.389330 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_66114922-3d2e-40e1-9d35-84b0960ea5a2/rabbitmq/0.log" Nov 28 11:44:08 crc kubenswrapper[4923]: I1128 11:44:08.403440 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_66114922-3d2e-40e1-9d35-84b0960ea5a2/setup-container/0.log" Nov 28 11:44:08 crc kubenswrapper[4923]: I1128 11:44:08.515486 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d4b9d25a-9809-4c97-a1dd-37d779b158cf/setup-container/0.log" Nov 28 11:44:08 crc kubenswrapper[4923]: I1128 11:44:08.745429 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d4b9d25a-9809-4c97-a1dd-37d779b158cf/setup-container/0.log" Nov 28 11:44:08 crc kubenswrapper[4923]: I1128 11:44:08.792171 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d4b9d25a-9809-4c97-a1dd-37d779b158cf/rabbitmq/0.log" Nov 28 11:44:08 crc kubenswrapper[4923]: I1128 11:44:08.824519 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-c6dbl_0577aa66-e25a-4198-84a3-db8becccbbf6/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:44:08 crc kubenswrapper[4923]: I1128 11:44:08.871193 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_c444a612-9839-4189-be4c-955e0f964442/memcached/0.log" Nov 28 11:44:09 crc kubenswrapper[4923]: I1128 11:44:09.009817 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-ddcpw_8298cf34-2702-46ec-a4e0-002988266a81/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:44:09 crc kubenswrapper[4923]: I1128 11:44:09.112655 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-mtsmh_97a4f32b-623f-4eda-9418-9edc6c64a043/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:44:09 crc kubenswrapper[4923]: I1128 11:44:09.247822 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-6tcd6_e0fd4e3b-71f7-4788-a7b5-6c23adc4cf11/ssh-known-hosts-edpm-deployment/0.log" Nov 28 11:44:09 crc kubenswrapper[4923]: I1128 11:44:09.300808 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-4dhrm_f7e0da2f-9257-45b6-be10-0e7c9daa73ab/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:44:32 crc kubenswrapper[4923]: I1128 11:44:32.115315 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl_f0dbc892-54ce-4fdc-8989-a68112853524/util/0.log" Nov 28 11:44:32 crc kubenswrapper[4923]: I1128 11:44:32.359223 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl_f0dbc892-54ce-4fdc-8989-a68112853524/util/0.log" Nov 28 11:44:32 crc kubenswrapper[4923]: I1128 11:44:32.377487 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl_f0dbc892-54ce-4fdc-8989-a68112853524/pull/0.log" Nov 28 11:44:32 crc kubenswrapper[4923]: I1128 11:44:32.423503 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl_f0dbc892-54ce-4fdc-8989-a68112853524/pull/0.log" Nov 28 11:44:32 crc kubenswrapper[4923]: I1128 11:44:32.572770 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl_f0dbc892-54ce-4fdc-8989-a68112853524/pull/0.log" Nov 28 11:44:32 crc kubenswrapper[4923]: I1128 11:44:32.575337 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl_f0dbc892-54ce-4fdc-8989-a68112853524/util/0.log" Nov 28 11:44:32 crc kubenswrapper[4923]: I1128 11:44:32.617847 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_407a49296837c8c1ba2ba3d7e1a48e4734f20cac0b622a348cb10970b8zbzwl_f0dbc892-54ce-4fdc-8989-a68112853524/extract/0.log" Nov 28 11:44:32 crc kubenswrapper[4923]: I1128 11:44:32.770603 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-kbm2r_eb007735-97dd-4d13-9b3d-28adefb557e1/kube-rbac-proxy/0.log" Nov 28 11:44:32 crc kubenswrapper[4923]: I1128 11:44:32.828276 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-kbm2r_eb007735-97dd-4d13-9b3d-28adefb557e1/manager/0.log" Nov 28 11:44:32 crc kubenswrapper[4923]: I1128 11:44:32.898786 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-p9k2s_09a1376c-00d7-4540-a905-078c297241cb/kube-rbac-proxy/0.log" Nov 28 11:44:33 crc kubenswrapper[4923]: I1128 11:44:33.009029 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-p9k2s_09a1376c-00d7-4540-a905-078c297241cb/manager/0.log" Nov 28 11:44:33 crc kubenswrapper[4923]: I1128 11:44:33.107400 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-8ftq2_e469dd36-fba4-4342-8fd6-ef847f821393/kube-rbac-proxy/0.log" Nov 28 11:44:33 crc kubenswrapper[4923]: I1128 11:44:33.142244 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-8ftq2_e469dd36-fba4-4342-8fd6-ef847f821393/manager/0.log" Nov 28 11:44:33 crc kubenswrapper[4923]: I1128 11:44:33.284414 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-kmtzj_2ef6846b-733b-4c63-8add-5c3251658a7e/kube-rbac-proxy/0.log" Nov 28 11:44:33 crc kubenswrapper[4923]: I1128 11:44:33.417789 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-kmtzj_2ef6846b-733b-4c63-8add-5c3251658a7e/manager/0.log" Nov 28 11:44:33 crc kubenswrapper[4923]: I1128 11:44:33.555545 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-jtpmz_2ea0b3c3-a5d4-4b2f-81ef-a52573d37e06/kube-rbac-proxy/0.log" Nov 28 11:44:33 crc kubenswrapper[4923]: I1128 11:44:33.566054 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-jtpmz_2ea0b3c3-a5d4-4b2f-81ef-a52573d37e06/manager/0.log" Nov 28 11:44:33 crc kubenswrapper[4923]: I1128 11:44:33.648834 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-98w4g_a813ab03-9734-4a76-aef0-62c7606c85d5/kube-rbac-proxy/0.log" Nov 28 11:44:33 crc kubenswrapper[4923]: I1128 11:44:33.765380 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-98w4g_a813ab03-9734-4a76-aef0-62c7606c85d5/manager/0.log" Nov 28 11:44:33 crc kubenswrapper[4923]: I1128 11:44:33.818035 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-2snwf_ec4c9bb0-95fa-4840-8b48-de2b822bb788/kube-rbac-proxy/0.log" Nov 28 11:44:33 crc kubenswrapper[4923]: I1128 11:44:33.960701 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-2snwf_ec4c9bb0-95fa-4840-8b48-de2b822bb788/manager/0.log" Nov 28 11:44:34 crc kubenswrapper[4923]: I1128 11:44:34.009369 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-j72jx_52b8b64e-5401-41ef-8d65-cc275cdaf832/kube-rbac-proxy/0.log" Nov 28 11:44:34 crc kubenswrapper[4923]: I1128 11:44:34.072079 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-j72jx_52b8b64e-5401-41ef-8d65-cc275cdaf832/manager/0.log" Nov 28 11:44:34 crc kubenswrapper[4923]: I1128 11:44:34.209697 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-sk9fr_c434efb7-70cf-4c94-be0d-9635325d758c/kube-rbac-proxy/0.log" Nov 28 11:44:34 crc kubenswrapper[4923]: I1128 11:44:34.337229 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-sk9fr_c434efb7-70cf-4c94-be0d-9635325d758c/manager/0.log" Nov 28 11:44:34 crc kubenswrapper[4923]: I1128 11:44:34.444969 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-v8vn8_ed2b1137-a903-4224-b706-304a2f416007/kube-rbac-proxy/0.log" Nov 28 11:44:34 crc kubenswrapper[4923]: I1128 11:44:34.494486 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-v8vn8_ed2b1137-a903-4224-b706-304a2f416007/manager/0.log" Nov 28 11:44:34 crc kubenswrapper[4923]: I1128 11:44:34.528354 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-qc4bb_bd6c3e5b-2eb9-4f4b-8893-07aab7091fab/kube-rbac-proxy/0.log" Nov 28 11:44:34 crc kubenswrapper[4923]: I1128 11:44:34.649610 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-qc4bb_bd6c3e5b-2eb9-4f4b-8893-07aab7091fab/manager/0.log" Nov 28 11:44:34 crc kubenswrapper[4923]: I1128 11:44:34.716654 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-9z8n7_5d041894-6ce7-401f-9b0b-5d5a9e31a68d/kube-rbac-proxy/0.log" Nov 28 11:44:34 crc kubenswrapper[4923]: I1128 11:44:34.754720 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-9z8n7_5d041894-6ce7-401f-9b0b-5d5a9e31a68d/manager/0.log" Nov 28 11:44:34 crc kubenswrapper[4923]: I1128 11:44:34.937798 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-vvgng_f9c94487-8e74-4cc0-ac40-834c175a770f/kube-rbac-proxy/0.log" Nov 28 11:44:34 crc kubenswrapper[4923]: I1128 11:44:34.996438 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-vvgng_f9c94487-8e74-4cc0-ac40-834c175a770f/manager/0.log" Nov 28 11:44:35 crc kubenswrapper[4923]: I1128 11:44:35.078568 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-ktqqj_c2fd1946-a3cb-453e-a1f3-458e14cb35ec/kube-rbac-proxy/0.log" Nov 28 11:44:35 crc kubenswrapper[4923]: I1128 11:44:35.178957 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-ktqqj_c2fd1946-a3cb-453e-a1f3-458e14cb35ec/manager/0.log" Nov 28 11:44:35 crc kubenswrapper[4923]: I1128 11:44:35.293129 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx_4c92eac3-127a-4b96-adf3-e3e52ba9015d/kube-rbac-proxy/0.log" Nov 28 11:44:35 crc kubenswrapper[4923]: I1128 11:44:35.301892 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bz5nqx_4c92eac3-127a-4b96-adf3-e3e52ba9015d/manager/0.log" Nov 28 11:44:35 crc kubenswrapper[4923]: I1128 11:44:35.750135 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-7f586794b9-kkz9j_6a8160fc-9979-4e9e-a289-2c4e0a728f1c/operator/0.log" Nov 28 11:44:36 crc kubenswrapper[4923]: I1128 11:44:36.059575 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-nmwq5_7493be0a-26b4-4c54-b0dd-456f39fe357e/registry-server/0.log" Nov 28 11:44:36 crc kubenswrapper[4923]: I1128 11:44:36.175144 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-6fbf799579-4qrnz_7d3c8ccd-8582-467e-9017-4f08eaac26ab/manager/0.log" Nov 28 11:44:36 crc kubenswrapper[4923]: I1128 11:44:36.214081 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-sv67d_e00c03c3-987c-4a5a-9c6f-2d15cd86a639/kube-rbac-proxy/0.log" Nov 28 11:44:36 crc kubenswrapper[4923]: I1128 11:44:36.326326 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-sv67d_e00c03c3-987c-4a5a-9c6f-2d15cd86a639/manager/0.log" Nov 28 11:44:36 crc kubenswrapper[4923]: I1128 11:44:36.549389 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-28rch_b5fe44dd-beba-450d-a04a-59f3046ab0bb/manager/0.log" Nov 28 11:44:36 crc kubenswrapper[4923]: I1128 11:44:36.570768 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-28rch_b5fe44dd-beba-450d-a04a-59f3046ab0bb/kube-rbac-proxy/0.log" Nov 28 11:44:36 crc kubenswrapper[4923]: I1128 11:44:36.694006 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-zl5xb_44d18a2d-97f3-4e4a-82bf-6de8634c7585/operator/0.log" Nov 28 11:44:36 crc kubenswrapper[4923]: I1128 11:44:36.921191 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-t9tvv_f193a1a9-1c5d-4d16-a9c1-3a17530bed74/manager/0.log" Nov 28 11:44:37 crc kubenswrapper[4923]: I1128 11:44:37.002656 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-t9tvv_f193a1a9-1c5d-4d16-a9c1-3a17530bed74/kube-rbac-proxy/0.log" Nov 28 11:44:37 crc kubenswrapper[4923]: I1128 11:44:37.091670 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-5vgzd_1745a114-0278-4e37-9f5a-34ccaa421f19/manager/0.log" Nov 28 11:44:37 crc kubenswrapper[4923]: I1128 11:44:37.126372 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-5vgzd_1745a114-0278-4e37-9f5a-34ccaa421f19/kube-rbac-proxy/0.log" Nov 28 11:44:37 crc kubenswrapper[4923]: I1128 11:44:37.305639 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-2tvqk_f6fd5062-da63-4fb0-bb4d-80643cb85ca7/kube-rbac-proxy/0.log" Nov 28 11:44:37 crc kubenswrapper[4923]: I1128 11:44:37.411048 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-646x4_4a5bf8f0-87dd-4e04-bd23-8379f541b020/kube-rbac-proxy/0.log" Nov 28 11:44:37 crc kubenswrapper[4923]: I1128 11:44:37.440088 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-2tvqk_f6fd5062-da63-4fb0-bb4d-80643cb85ca7/manager/0.log" Nov 28 11:44:37 crc kubenswrapper[4923]: I1128 11:44:37.486782 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-646x4_4a5bf8f0-87dd-4e04-bd23-8379f541b020/manager/0.log" Nov 28 11:44:57 crc kubenswrapper[4923]: I1128 11:44:57.047792 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-766xq"] Nov 28 11:44:57 crc kubenswrapper[4923]: E1128 11:44:57.050402 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53cfe1c3-4b9a-43b1-8266-c8a6f23690d3" containerName="container-00" Nov 28 11:44:57 crc kubenswrapper[4923]: I1128 11:44:57.050431 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="53cfe1c3-4b9a-43b1-8266-c8a6f23690d3" containerName="container-00" Nov 28 11:44:57 crc kubenswrapper[4923]: I1128 11:44:57.050641 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="53cfe1c3-4b9a-43b1-8266-c8a6f23690d3" containerName="container-00" Nov 28 11:44:57 crc kubenswrapper[4923]: I1128 11:44:57.052134 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-766xq" Nov 28 11:44:57 crc kubenswrapper[4923]: I1128 11:44:57.098634 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-766xq"] Nov 28 11:44:57 crc kubenswrapper[4923]: I1128 11:44:57.247165 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5534c806-78b7-4bc3-8274-9e651e883c9b-utilities\") pod \"redhat-marketplace-766xq\" (UID: \"5534c806-78b7-4bc3-8274-9e651e883c9b\") " pod="openshift-marketplace/redhat-marketplace-766xq" Nov 28 11:44:57 crc kubenswrapper[4923]: I1128 11:44:57.247991 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vhbh\" (UniqueName: \"kubernetes.io/projected/5534c806-78b7-4bc3-8274-9e651e883c9b-kube-api-access-8vhbh\") pod \"redhat-marketplace-766xq\" (UID: \"5534c806-78b7-4bc3-8274-9e651e883c9b\") " pod="openshift-marketplace/redhat-marketplace-766xq" Nov 28 11:44:57 crc kubenswrapper[4923]: I1128 11:44:57.248073 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5534c806-78b7-4bc3-8274-9e651e883c9b-catalog-content\") pod \"redhat-marketplace-766xq\" (UID: \"5534c806-78b7-4bc3-8274-9e651e883c9b\") " pod="openshift-marketplace/redhat-marketplace-766xq" Nov 28 11:44:57 crc kubenswrapper[4923]: I1128 11:44:57.349050 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vhbh\" (UniqueName: \"kubernetes.io/projected/5534c806-78b7-4bc3-8274-9e651e883c9b-kube-api-access-8vhbh\") pod \"redhat-marketplace-766xq\" (UID: \"5534c806-78b7-4bc3-8274-9e651e883c9b\") " pod="openshift-marketplace/redhat-marketplace-766xq" Nov 28 11:44:57 crc kubenswrapper[4923]: I1128 11:44:57.349119 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5534c806-78b7-4bc3-8274-9e651e883c9b-catalog-content\") pod \"redhat-marketplace-766xq\" (UID: \"5534c806-78b7-4bc3-8274-9e651e883c9b\") " pod="openshift-marketplace/redhat-marketplace-766xq" Nov 28 11:44:57 crc kubenswrapper[4923]: I1128 11:44:57.349194 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5534c806-78b7-4bc3-8274-9e651e883c9b-utilities\") pod \"redhat-marketplace-766xq\" (UID: \"5534c806-78b7-4bc3-8274-9e651e883c9b\") " pod="openshift-marketplace/redhat-marketplace-766xq" Nov 28 11:44:57 crc kubenswrapper[4923]: I1128 11:44:57.349732 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5534c806-78b7-4bc3-8274-9e651e883c9b-utilities\") pod \"redhat-marketplace-766xq\" (UID: \"5534c806-78b7-4bc3-8274-9e651e883c9b\") " pod="openshift-marketplace/redhat-marketplace-766xq" Nov 28 11:44:57 crc kubenswrapper[4923]: I1128 11:44:57.350989 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5534c806-78b7-4bc3-8274-9e651e883c9b-catalog-content\") pod \"redhat-marketplace-766xq\" (UID: \"5534c806-78b7-4bc3-8274-9e651e883c9b\") " pod="openshift-marketplace/redhat-marketplace-766xq" Nov 28 11:44:57 crc kubenswrapper[4923]: I1128 11:44:57.380872 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vhbh\" (UniqueName: \"kubernetes.io/projected/5534c806-78b7-4bc3-8274-9e651e883c9b-kube-api-access-8vhbh\") pod \"redhat-marketplace-766xq\" (UID: \"5534c806-78b7-4bc3-8274-9e651e883c9b\") " pod="openshift-marketplace/redhat-marketplace-766xq" Nov 28 11:44:57 crc kubenswrapper[4923]: I1128 11:44:57.680207 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-766xq" Nov 28 11:44:58 crc kubenswrapper[4923]: I1128 11:44:58.181393 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-766xq"] Nov 28 11:44:58 crc kubenswrapper[4923]: I1128 11:44:58.607593 4923 generic.go:334] "Generic (PLEG): container finished" podID="5534c806-78b7-4bc3-8274-9e651e883c9b" containerID="7adc0db8652045c3c43e27636aa2e92ee2b00ac8a038610bf970cfaefc6f2da6" exitCode=0 Nov 28 11:44:58 crc kubenswrapper[4923]: I1128 11:44:58.607639 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-766xq" event={"ID":"5534c806-78b7-4bc3-8274-9e651e883c9b","Type":"ContainerDied","Data":"7adc0db8652045c3c43e27636aa2e92ee2b00ac8a038610bf970cfaefc6f2da6"} Nov 28 11:44:58 crc kubenswrapper[4923]: I1128 11:44:58.607672 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-766xq" event={"ID":"5534c806-78b7-4bc3-8274-9e651e883c9b","Type":"ContainerStarted","Data":"d35baa1225f7d86522ae84dd21f63c140542d9e54ba4b3225dda6209a0eaad0c"} Nov 28 11:44:59 crc kubenswrapper[4923]: I1128 11:44:59.207201 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-f4dfh_3a41216a-9d26-4691-aa6b-8a50c0a94016/control-plane-machine-set-operator/0.log" Nov 28 11:44:59 crc kubenswrapper[4923]: I1128 11:44:59.417404 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-m4snv_c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b/machine-api-operator/0.log" Nov 28 11:44:59 crc kubenswrapper[4923]: I1128 11:44:59.439895 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-m4snv_c3641d9d-5e9b-40a7-90ec-4fa7b3f42a4b/kube-rbac-proxy/0.log" Nov 28 11:45:00 crc kubenswrapper[4923]: I1128 11:45:00.135881 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q"] Nov 28 11:45:00 crc kubenswrapper[4923]: I1128 11:45:00.137237 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q" Nov 28 11:45:00 crc kubenswrapper[4923]: I1128 11:45:00.144003 4923 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 11:45:00 crc kubenswrapper[4923]: I1128 11:45:00.144012 4923 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 11:45:00 crc kubenswrapper[4923]: I1128 11:45:00.166270 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q"] Nov 28 11:45:00 crc kubenswrapper[4923]: I1128 11:45:00.299456 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmpqz\" (UniqueName: \"kubernetes.io/projected/cb49ceb3-8c67-4465-90af-fbc481fb2339-kube-api-access-cmpqz\") pod \"collect-profiles-29405505-nx99q\" (UID: \"cb49ceb3-8c67-4465-90af-fbc481fb2339\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q" Nov 28 11:45:00 crc kubenswrapper[4923]: I1128 11:45:00.299518 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb49ceb3-8c67-4465-90af-fbc481fb2339-secret-volume\") pod \"collect-profiles-29405505-nx99q\" (UID: \"cb49ceb3-8c67-4465-90af-fbc481fb2339\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q" Nov 28 11:45:00 crc kubenswrapper[4923]: I1128 11:45:00.299538 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb49ceb3-8c67-4465-90af-fbc481fb2339-config-volume\") pod \"collect-profiles-29405505-nx99q\" (UID: \"cb49ceb3-8c67-4465-90af-fbc481fb2339\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q" Nov 28 11:45:00 crc kubenswrapper[4923]: I1128 11:45:00.401302 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmpqz\" (UniqueName: \"kubernetes.io/projected/cb49ceb3-8c67-4465-90af-fbc481fb2339-kube-api-access-cmpqz\") pod \"collect-profiles-29405505-nx99q\" (UID: \"cb49ceb3-8c67-4465-90af-fbc481fb2339\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q" Nov 28 11:45:00 crc kubenswrapper[4923]: I1128 11:45:00.401372 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb49ceb3-8c67-4465-90af-fbc481fb2339-secret-volume\") pod \"collect-profiles-29405505-nx99q\" (UID: \"cb49ceb3-8c67-4465-90af-fbc481fb2339\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q" Nov 28 11:45:00 crc kubenswrapper[4923]: I1128 11:45:00.401396 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb49ceb3-8c67-4465-90af-fbc481fb2339-config-volume\") pod \"collect-profiles-29405505-nx99q\" (UID: \"cb49ceb3-8c67-4465-90af-fbc481fb2339\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q" Nov 28 11:45:00 crc kubenswrapper[4923]: I1128 11:45:00.402528 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb49ceb3-8c67-4465-90af-fbc481fb2339-config-volume\") pod \"collect-profiles-29405505-nx99q\" (UID: \"cb49ceb3-8c67-4465-90af-fbc481fb2339\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q" Nov 28 11:45:00 crc kubenswrapper[4923]: I1128 11:45:00.407004 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb49ceb3-8c67-4465-90af-fbc481fb2339-secret-volume\") pod \"collect-profiles-29405505-nx99q\" (UID: \"cb49ceb3-8c67-4465-90af-fbc481fb2339\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q" Nov 28 11:45:00 crc kubenswrapper[4923]: I1128 11:45:00.423826 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmpqz\" (UniqueName: \"kubernetes.io/projected/cb49ceb3-8c67-4465-90af-fbc481fb2339-kube-api-access-cmpqz\") pod \"collect-profiles-29405505-nx99q\" (UID: \"cb49ceb3-8c67-4465-90af-fbc481fb2339\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q" Nov 28 11:45:00 crc kubenswrapper[4923]: I1128 11:45:00.460275 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q" Nov 28 11:45:00 crc kubenswrapper[4923]: I1128 11:45:00.639052 4923 generic.go:334] "Generic (PLEG): container finished" podID="5534c806-78b7-4bc3-8274-9e651e883c9b" containerID="5eec22dad361382ca16708d919637519aa16e63e4c00c7a0d8ab60234f01c221" exitCode=0 Nov 28 11:45:00 crc kubenswrapper[4923]: I1128 11:45:00.639091 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-766xq" event={"ID":"5534c806-78b7-4bc3-8274-9e651e883c9b","Type":"ContainerDied","Data":"5eec22dad361382ca16708d919637519aa16e63e4c00c7a0d8ab60234f01c221"} Nov 28 11:45:00 crc kubenswrapper[4923]: I1128 11:45:00.940838 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q"] Nov 28 11:45:00 crc kubenswrapper[4923]: W1128 11:45:00.942456 4923 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcb49ceb3_8c67_4465_90af_fbc481fb2339.slice/crio-d48dbbef24d6c7eae6c6f41c821285f2c3beeb202c68db653197ff0c105968cf WatchSource:0}: Error finding container d48dbbef24d6c7eae6c6f41c821285f2c3beeb202c68db653197ff0c105968cf: Status 404 returned error can't find the container with id d48dbbef24d6c7eae6c6f41c821285f2c3beeb202c68db653197ff0c105968cf Nov 28 11:45:01 crc kubenswrapper[4923]: I1128 11:45:01.647722 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-766xq" event={"ID":"5534c806-78b7-4bc3-8274-9e651e883c9b","Type":"ContainerStarted","Data":"1ad3cdeb5e96f7cb8be872ec20f7d9f1f2d09b423c2f6c259ab94856425ffab1"} Nov 28 11:45:01 crc kubenswrapper[4923]: I1128 11:45:01.650523 4923 generic.go:334] "Generic (PLEG): container finished" podID="cb49ceb3-8c67-4465-90af-fbc481fb2339" containerID="2fe7df4d8d419fd1897ca90207841025fab67f4bfedbc6b70beb883889b7b01b" exitCode=0 Nov 28 11:45:01 crc kubenswrapper[4923]: I1128 11:45:01.650563 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q" event={"ID":"cb49ceb3-8c67-4465-90af-fbc481fb2339","Type":"ContainerDied","Data":"2fe7df4d8d419fd1897ca90207841025fab67f4bfedbc6b70beb883889b7b01b"} Nov 28 11:45:01 crc kubenswrapper[4923]: I1128 11:45:01.650600 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q" event={"ID":"cb49ceb3-8c67-4465-90af-fbc481fb2339","Type":"ContainerStarted","Data":"d48dbbef24d6c7eae6c6f41c821285f2c3beeb202c68db653197ff0c105968cf"} Nov 28 11:45:01 crc kubenswrapper[4923]: I1128 11:45:01.666347 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-766xq" podStartSLOduration=2.147316686 podStartE2EDuration="4.666335198s" podCreationTimestamp="2025-11-28 11:44:57 +0000 UTC" firstStartedPulling="2025-11-28 11:44:58.615747593 +0000 UTC m=+2177.744431813" lastFinishedPulling="2025-11-28 11:45:01.134766115 +0000 UTC m=+2180.263450325" observedRunningTime="2025-11-28 11:45:01.662779337 +0000 UTC m=+2180.791463547" watchObservedRunningTime="2025-11-28 11:45:01.666335198 +0000 UTC m=+2180.795019398" Nov 28 11:45:03 crc kubenswrapper[4923]: I1128 11:45:03.002963 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q" Nov 28 11:45:03 crc kubenswrapper[4923]: I1128 11:45:03.151832 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb49ceb3-8c67-4465-90af-fbc481fb2339-config-volume\") pod \"cb49ceb3-8c67-4465-90af-fbc481fb2339\" (UID: \"cb49ceb3-8c67-4465-90af-fbc481fb2339\") " Nov 28 11:45:03 crc kubenswrapper[4923]: I1128 11:45:03.152083 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmpqz\" (UniqueName: \"kubernetes.io/projected/cb49ceb3-8c67-4465-90af-fbc481fb2339-kube-api-access-cmpqz\") pod \"cb49ceb3-8c67-4465-90af-fbc481fb2339\" (UID: \"cb49ceb3-8c67-4465-90af-fbc481fb2339\") " Nov 28 11:45:03 crc kubenswrapper[4923]: I1128 11:45:03.152108 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb49ceb3-8c67-4465-90af-fbc481fb2339-secret-volume\") pod \"cb49ceb3-8c67-4465-90af-fbc481fb2339\" (UID: \"cb49ceb3-8c67-4465-90af-fbc481fb2339\") " Nov 28 11:45:03 crc kubenswrapper[4923]: I1128 11:45:03.152502 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb49ceb3-8c67-4465-90af-fbc481fb2339-config-volume" (OuterVolumeSpecName: "config-volume") pod "cb49ceb3-8c67-4465-90af-fbc481fb2339" (UID: "cb49ceb3-8c67-4465-90af-fbc481fb2339"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:45:03 crc kubenswrapper[4923]: I1128 11:45:03.157130 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb49ceb3-8c67-4465-90af-fbc481fb2339-kube-api-access-cmpqz" (OuterVolumeSpecName: "kube-api-access-cmpqz") pod "cb49ceb3-8c67-4465-90af-fbc481fb2339" (UID: "cb49ceb3-8c67-4465-90af-fbc481fb2339"). InnerVolumeSpecName "kube-api-access-cmpqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:45:03 crc kubenswrapper[4923]: I1128 11:45:03.158047 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb49ceb3-8c67-4465-90af-fbc481fb2339-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "cb49ceb3-8c67-4465-90af-fbc481fb2339" (UID: "cb49ceb3-8c67-4465-90af-fbc481fb2339"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:45:03 crc kubenswrapper[4923]: I1128 11:45:03.254910 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmpqz\" (UniqueName: \"kubernetes.io/projected/cb49ceb3-8c67-4465-90af-fbc481fb2339-kube-api-access-cmpqz\") on node \"crc\" DevicePath \"\"" Nov 28 11:45:03 crc kubenswrapper[4923]: I1128 11:45:03.254972 4923 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb49ceb3-8c67-4465-90af-fbc481fb2339-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 11:45:03 crc kubenswrapper[4923]: I1128 11:45:03.254988 4923 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb49ceb3-8c67-4465-90af-fbc481fb2339-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 11:45:03 crc kubenswrapper[4923]: I1128 11:45:03.672070 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q" event={"ID":"cb49ceb3-8c67-4465-90af-fbc481fb2339","Type":"ContainerDied","Data":"d48dbbef24d6c7eae6c6f41c821285f2c3beeb202c68db653197ff0c105968cf"} Nov 28 11:45:03 crc kubenswrapper[4923]: I1128 11:45:03.672138 4923 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d48dbbef24d6c7eae6c6f41c821285f2c3beeb202c68db653197ff0c105968cf" Nov 28 11:45:03 crc kubenswrapper[4923]: I1128 11:45:03.672148 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405505-nx99q" Nov 28 11:45:04 crc kubenswrapper[4923]: I1128 11:45:04.094819 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj"] Nov 28 11:45:04 crc kubenswrapper[4923]: I1128 11:45:04.104745 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405460-778cj"] Nov 28 11:45:05 crc kubenswrapper[4923]: I1128 11:45:05.183851 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89" path="/var/lib/kubelet/pods/ac7e8c4a-3957-4b3f-a6ce-968bb42f2a89/volumes" Nov 28 11:45:07 crc kubenswrapper[4923]: I1128 11:45:07.680355 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-766xq" Nov 28 11:45:07 crc kubenswrapper[4923]: I1128 11:45:07.680920 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-766xq" Nov 28 11:45:07 crc kubenswrapper[4923]: I1128 11:45:07.733473 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-766xq" Nov 28 11:45:07 crc kubenswrapper[4923]: I1128 11:45:07.795303 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-766xq" Nov 28 11:45:08 crc kubenswrapper[4923]: I1128 11:45:08.300127 4923 scope.go:117] "RemoveContainer" containerID="73a41071dd19a6a0e8a0f6f1a9488c6407d59ab295ef40752949d8cabd9b61ba" Nov 28 11:45:09 crc kubenswrapper[4923]: I1128 11:45:09.519211 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-766xq"] Nov 28 11:45:09 crc kubenswrapper[4923]: I1128 11:45:09.761406 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-766xq" podUID="5534c806-78b7-4bc3-8274-9e651e883c9b" containerName="registry-server" containerID="cri-o://1ad3cdeb5e96f7cb8be872ec20f7d9f1f2d09b423c2f6c259ab94856425ffab1" gracePeriod=2 Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.281331 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-766xq" Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.390525 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vhbh\" (UniqueName: \"kubernetes.io/projected/5534c806-78b7-4bc3-8274-9e651e883c9b-kube-api-access-8vhbh\") pod \"5534c806-78b7-4bc3-8274-9e651e883c9b\" (UID: \"5534c806-78b7-4bc3-8274-9e651e883c9b\") " Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.390836 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5534c806-78b7-4bc3-8274-9e651e883c9b-utilities\") pod \"5534c806-78b7-4bc3-8274-9e651e883c9b\" (UID: \"5534c806-78b7-4bc3-8274-9e651e883c9b\") " Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.390868 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5534c806-78b7-4bc3-8274-9e651e883c9b-catalog-content\") pod \"5534c806-78b7-4bc3-8274-9e651e883c9b\" (UID: \"5534c806-78b7-4bc3-8274-9e651e883c9b\") " Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.391655 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5534c806-78b7-4bc3-8274-9e651e883c9b-utilities" (OuterVolumeSpecName: "utilities") pod "5534c806-78b7-4bc3-8274-9e651e883c9b" (UID: "5534c806-78b7-4bc3-8274-9e651e883c9b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.397973 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5534c806-78b7-4bc3-8274-9e651e883c9b-kube-api-access-8vhbh" (OuterVolumeSpecName: "kube-api-access-8vhbh") pod "5534c806-78b7-4bc3-8274-9e651e883c9b" (UID: "5534c806-78b7-4bc3-8274-9e651e883c9b"). InnerVolumeSpecName "kube-api-access-8vhbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.410633 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5534c806-78b7-4bc3-8274-9e651e883c9b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5534c806-78b7-4bc3-8274-9e651e883c9b" (UID: "5534c806-78b7-4bc3-8274-9e651e883c9b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.492626 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5534c806-78b7-4bc3-8274-9e651e883c9b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.492660 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vhbh\" (UniqueName: \"kubernetes.io/projected/5534c806-78b7-4bc3-8274-9e651e883c9b-kube-api-access-8vhbh\") on node \"crc\" DevicePath \"\"" Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.492672 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5534c806-78b7-4bc3-8274-9e651e883c9b-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.770966 4923 generic.go:334] "Generic (PLEG): container finished" podID="5534c806-78b7-4bc3-8274-9e651e883c9b" containerID="1ad3cdeb5e96f7cb8be872ec20f7d9f1f2d09b423c2f6c259ab94856425ffab1" exitCode=0 Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.771007 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-766xq" event={"ID":"5534c806-78b7-4bc3-8274-9e651e883c9b","Type":"ContainerDied","Data":"1ad3cdeb5e96f7cb8be872ec20f7d9f1f2d09b423c2f6c259ab94856425ffab1"} Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.771038 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-766xq" event={"ID":"5534c806-78b7-4bc3-8274-9e651e883c9b","Type":"ContainerDied","Data":"d35baa1225f7d86522ae84dd21f63c140542d9e54ba4b3225dda6209a0eaad0c"} Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.771056 4923 scope.go:117] "RemoveContainer" containerID="1ad3cdeb5e96f7cb8be872ec20f7d9f1f2d09b423c2f6c259ab94856425ffab1" Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.772251 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-766xq" Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.790599 4923 scope.go:117] "RemoveContainer" containerID="5eec22dad361382ca16708d919637519aa16e63e4c00c7a0d8ab60234f01c221" Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.808261 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-766xq"] Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.815604 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-766xq"] Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.820801 4923 scope.go:117] "RemoveContainer" containerID="7adc0db8652045c3c43e27636aa2e92ee2b00ac8a038610bf970cfaefc6f2da6" Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.855468 4923 scope.go:117] "RemoveContainer" containerID="1ad3cdeb5e96f7cb8be872ec20f7d9f1f2d09b423c2f6c259ab94856425ffab1" Nov 28 11:45:10 crc kubenswrapper[4923]: E1128 11:45:10.855948 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ad3cdeb5e96f7cb8be872ec20f7d9f1f2d09b423c2f6c259ab94856425ffab1\": container with ID starting with 1ad3cdeb5e96f7cb8be872ec20f7d9f1f2d09b423c2f6c259ab94856425ffab1 not found: ID does not exist" containerID="1ad3cdeb5e96f7cb8be872ec20f7d9f1f2d09b423c2f6c259ab94856425ffab1" Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.856052 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ad3cdeb5e96f7cb8be872ec20f7d9f1f2d09b423c2f6c259ab94856425ffab1"} err="failed to get container status \"1ad3cdeb5e96f7cb8be872ec20f7d9f1f2d09b423c2f6c259ab94856425ffab1\": rpc error: code = NotFound desc = could not find container \"1ad3cdeb5e96f7cb8be872ec20f7d9f1f2d09b423c2f6c259ab94856425ffab1\": container with ID starting with 1ad3cdeb5e96f7cb8be872ec20f7d9f1f2d09b423c2f6c259ab94856425ffab1 not found: ID does not exist" Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.856125 4923 scope.go:117] "RemoveContainer" containerID="5eec22dad361382ca16708d919637519aa16e63e4c00c7a0d8ab60234f01c221" Nov 28 11:45:10 crc kubenswrapper[4923]: E1128 11:45:10.856453 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5eec22dad361382ca16708d919637519aa16e63e4c00c7a0d8ab60234f01c221\": container with ID starting with 5eec22dad361382ca16708d919637519aa16e63e4c00c7a0d8ab60234f01c221 not found: ID does not exist" containerID="5eec22dad361382ca16708d919637519aa16e63e4c00c7a0d8ab60234f01c221" Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.856476 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5eec22dad361382ca16708d919637519aa16e63e4c00c7a0d8ab60234f01c221"} err="failed to get container status \"5eec22dad361382ca16708d919637519aa16e63e4c00c7a0d8ab60234f01c221\": rpc error: code = NotFound desc = could not find container \"5eec22dad361382ca16708d919637519aa16e63e4c00c7a0d8ab60234f01c221\": container with ID starting with 5eec22dad361382ca16708d919637519aa16e63e4c00c7a0d8ab60234f01c221 not found: ID does not exist" Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.856505 4923 scope.go:117] "RemoveContainer" containerID="7adc0db8652045c3c43e27636aa2e92ee2b00ac8a038610bf970cfaefc6f2da6" Nov 28 11:45:10 crc kubenswrapper[4923]: E1128 11:45:10.856824 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7adc0db8652045c3c43e27636aa2e92ee2b00ac8a038610bf970cfaefc6f2da6\": container with ID starting with 7adc0db8652045c3c43e27636aa2e92ee2b00ac8a038610bf970cfaefc6f2da6 not found: ID does not exist" containerID="7adc0db8652045c3c43e27636aa2e92ee2b00ac8a038610bf970cfaefc6f2da6" Nov 28 11:45:10 crc kubenswrapper[4923]: I1128 11:45:10.856896 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7adc0db8652045c3c43e27636aa2e92ee2b00ac8a038610bf970cfaefc6f2da6"} err="failed to get container status \"7adc0db8652045c3c43e27636aa2e92ee2b00ac8a038610bf970cfaefc6f2da6\": rpc error: code = NotFound desc = could not find container \"7adc0db8652045c3c43e27636aa2e92ee2b00ac8a038610bf970cfaefc6f2da6\": container with ID starting with 7adc0db8652045c3c43e27636aa2e92ee2b00ac8a038610bf970cfaefc6f2da6 not found: ID does not exist" Nov 28 11:45:11 crc kubenswrapper[4923]: I1128 11:45:11.181068 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5534c806-78b7-4bc3-8274-9e651e883c9b" path="/var/lib/kubelet/pods/5534c806-78b7-4bc3-8274-9e651e883c9b/volumes" Nov 28 11:45:14 crc kubenswrapper[4923]: I1128 11:45:14.025849 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:45:14 crc kubenswrapper[4923]: I1128 11:45:14.026167 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:45:14 crc kubenswrapper[4923]: I1128 11:45:14.245309 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-n5wdm_b4b4fa5c-384b-4de9-8b8f-de1249a7f3c2/cert-manager-controller/0.log" Nov 28 11:45:14 crc kubenswrapper[4923]: I1128 11:45:14.391737 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-jh94j_2fa1e707-df09-47b5-bfaa-732a6e50d099/cert-manager-cainjector/0.log" Nov 28 11:45:14 crc kubenswrapper[4923]: I1128 11:45:14.420748 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-8tb85_999e67cd-bc9f-4886-9127-80740f13d57c/cert-manager-webhook/0.log" Nov 28 11:45:28 crc kubenswrapper[4923]: I1128 11:45:28.368825 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-btvd5_66e9ab76-5eff-4f56-8ff0-aabc981b3b0e/nmstate-console-plugin/0.log" Nov 28 11:45:28 crc kubenswrapper[4923]: I1128 11:45:28.501956 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-8rfxv_860a9fa6-e760-4c45-a051-4fcf8b6c3fc4/nmstate-handler/0.log" Nov 28 11:45:28 crc kubenswrapper[4923]: I1128 11:45:28.575081 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-pmjlv_772d8536-f37b-434f-a5d3-d569c2079591/kube-rbac-proxy/0.log" Nov 28 11:45:28 crc kubenswrapper[4923]: I1128 11:45:28.591269 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-pmjlv_772d8536-f37b-434f-a5d3-d569c2079591/nmstate-metrics/0.log" Nov 28 11:45:28 crc kubenswrapper[4923]: I1128 11:45:28.779581 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-8ng48_0aa4d573-fe6f-4c0a-930d-99cd8691c86f/nmstate-operator/0.log" Nov 28 11:45:28 crc kubenswrapper[4923]: I1128 11:45:28.849411 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-pns7s_b6f09dc3-c7f4-40fe-862e-badef31718a6/nmstate-webhook/0.log" Nov 28 11:45:43 crc kubenswrapper[4923]: I1128 11:45:43.298600 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-42ftq_9cc1acfe-a432-40e8-859a-a026519e0b19/kube-rbac-proxy/0.log" Nov 28 11:45:43 crc kubenswrapper[4923]: I1128 11:45:43.484143 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-42ftq_9cc1acfe-a432-40e8-859a-a026519e0b19/controller/0.log" Nov 28 11:45:43 crc kubenswrapper[4923]: I1128 11:45:43.581562 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld8vz_fa72f0b5-6e71-4591-b569-5137c1176193/cp-frr-files/0.log" Nov 28 11:45:43 crc kubenswrapper[4923]: I1128 11:45:43.784676 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld8vz_fa72f0b5-6e71-4591-b569-5137c1176193/cp-reloader/0.log" Nov 28 11:45:43 crc kubenswrapper[4923]: I1128 11:45:43.805616 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld8vz_fa72f0b5-6e71-4591-b569-5137c1176193/cp-metrics/0.log" Nov 28 11:45:43 crc kubenswrapper[4923]: I1128 11:45:43.821350 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld8vz_fa72f0b5-6e71-4591-b569-5137c1176193/cp-frr-files/0.log" Nov 28 11:45:43 crc kubenswrapper[4923]: I1128 11:45:43.867753 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld8vz_fa72f0b5-6e71-4591-b569-5137c1176193/cp-reloader/0.log" Nov 28 11:45:43 crc kubenswrapper[4923]: I1128 11:45:43.986380 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld8vz_fa72f0b5-6e71-4591-b569-5137c1176193/cp-frr-files/0.log" Nov 28 11:45:44 crc kubenswrapper[4923]: I1128 11:45:44.008908 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld8vz_fa72f0b5-6e71-4591-b569-5137c1176193/cp-reloader/0.log" Nov 28 11:45:44 crc kubenswrapper[4923]: I1128 11:45:44.025670 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:45:44 crc kubenswrapper[4923]: I1128 11:45:44.025724 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:45:44 crc kubenswrapper[4923]: I1128 11:45:44.073293 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld8vz_fa72f0b5-6e71-4591-b569-5137c1176193/cp-metrics/0.log" Nov 28 11:45:44 crc kubenswrapper[4923]: I1128 11:45:44.077662 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld8vz_fa72f0b5-6e71-4591-b569-5137c1176193/cp-metrics/0.log" Nov 28 11:45:44 crc kubenswrapper[4923]: I1128 11:45:44.254834 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld8vz_fa72f0b5-6e71-4591-b569-5137c1176193/cp-frr-files/0.log" Nov 28 11:45:44 crc kubenswrapper[4923]: I1128 11:45:44.254835 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld8vz_fa72f0b5-6e71-4591-b569-5137c1176193/cp-metrics/0.log" Nov 28 11:45:44 crc kubenswrapper[4923]: I1128 11:45:44.267374 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld8vz_fa72f0b5-6e71-4591-b569-5137c1176193/cp-reloader/0.log" Nov 28 11:45:44 crc kubenswrapper[4923]: I1128 11:45:44.314293 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld8vz_fa72f0b5-6e71-4591-b569-5137c1176193/controller/0.log" Nov 28 11:45:44 crc kubenswrapper[4923]: I1128 11:45:44.478526 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld8vz_fa72f0b5-6e71-4591-b569-5137c1176193/frr-metrics/0.log" Nov 28 11:45:44 crc kubenswrapper[4923]: I1128 11:45:44.481821 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld8vz_fa72f0b5-6e71-4591-b569-5137c1176193/kube-rbac-proxy-frr/0.log" Nov 28 11:45:44 crc kubenswrapper[4923]: I1128 11:45:44.532593 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld8vz_fa72f0b5-6e71-4591-b569-5137c1176193/kube-rbac-proxy/0.log" Nov 28 11:45:44 crc kubenswrapper[4923]: I1128 11:45:44.749453 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-kj8cf_83a5f653-71d2-4df3-bc29-b7bbbcf13765/frr-k8s-webhook-server/0.log" Nov 28 11:45:44 crc kubenswrapper[4923]: I1128 11:45:44.828287 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld8vz_fa72f0b5-6e71-4591-b569-5137c1176193/reloader/0.log" Nov 28 11:45:45 crc kubenswrapper[4923]: I1128 11:45:45.113859 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-7fb5d894b8-9ncjh_870847e6-f37f-4f07-9cd0-a479202baf3d/manager/0.log" Nov 28 11:45:45 crc kubenswrapper[4923]: I1128 11:45:45.243923 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-58c4b9449c-b89p9_d410dcc0-ae4b-4376-8973-6691ea6b9939/webhook-server/0.log" Nov 28 11:45:45 crc kubenswrapper[4923]: I1128 11:45:45.250224 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld8vz_fa72f0b5-6e71-4591-b569-5137c1176193/frr/0.log" Nov 28 11:45:45 crc kubenswrapper[4923]: I1128 11:45:45.441118 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-tlcl5_f5feb4d3-7f22-4324-a745-2dcd5ec72db9/kube-rbac-proxy/0.log" Nov 28 11:45:45 crc kubenswrapper[4923]: I1128 11:45:45.678921 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-tlcl5_f5feb4d3-7f22-4324-a745-2dcd5ec72db9/speaker/0.log" Nov 28 11:45:59 crc kubenswrapper[4923]: I1128 11:45:59.148335 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69_84d6dae5-7d92-46d7-bc8b-56c31c5900f2/util/0.log" Nov 28 11:45:59 crc kubenswrapper[4923]: I1128 11:45:59.301662 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69_84d6dae5-7d92-46d7-bc8b-56c31c5900f2/util/0.log" Nov 28 11:45:59 crc kubenswrapper[4923]: I1128 11:45:59.352001 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69_84d6dae5-7d92-46d7-bc8b-56c31c5900f2/pull/0.log" Nov 28 11:45:59 crc kubenswrapper[4923]: I1128 11:45:59.388764 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69_84d6dae5-7d92-46d7-bc8b-56c31c5900f2/pull/0.log" Nov 28 11:45:59 crc kubenswrapper[4923]: I1128 11:45:59.593205 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69_84d6dae5-7d92-46d7-bc8b-56c31c5900f2/extract/0.log" Nov 28 11:45:59 crc kubenswrapper[4923]: I1128 11:45:59.605149 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69_84d6dae5-7d92-46d7-bc8b-56c31c5900f2/pull/0.log" Nov 28 11:45:59 crc kubenswrapper[4923]: I1128 11:45:59.617324 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftgb69_84d6dae5-7d92-46d7-bc8b-56c31c5900f2/util/0.log" Nov 28 11:45:59 crc kubenswrapper[4923]: I1128 11:45:59.766708 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8_a23f6f20-cf93-4a58-868f-42242f0f1e17/util/0.log" Nov 28 11:45:59 crc kubenswrapper[4923]: I1128 11:45:59.925356 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8_a23f6f20-cf93-4a58-868f-42242f0f1e17/util/0.log" Nov 28 11:45:59 crc kubenswrapper[4923]: I1128 11:45:59.951376 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8_a23f6f20-cf93-4a58-868f-42242f0f1e17/pull/0.log" Nov 28 11:45:59 crc kubenswrapper[4923]: I1128 11:45:59.958270 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8_a23f6f20-cf93-4a58-868f-42242f0f1e17/pull/0.log" Nov 28 11:46:00 crc kubenswrapper[4923]: I1128 11:46:00.132869 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8_a23f6f20-cf93-4a58-868f-42242f0f1e17/util/0.log" Nov 28 11:46:00 crc kubenswrapper[4923]: I1128 11:46:00.164361 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8_a23f6f20-cf93-4a58-868f-42242f0f1e17/extract/0.log" Nov 28 11:46:00 crc kubenswrapper[4923]: I1128 11:46:00.195793 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83cphz8_a23f6f20-cf93-4a58-868f-42242f0f1e17/pull/0.log" Nov 28 11:46:00 crc kubenswrapper[4923]: I1128 11:46:00.301653 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-rwgwx_20b869c7-4aad-4ba4-a8c0-c1dbd053515c/extract-utilities/0.log" Nov 28 11:46:00 crc kubenswrapper[4923]: I1128 11:46:00.466770 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-rwgwx_20b869c7-4aad-4ba4-a8c0-c1dbd053515c/extract-utilities/0.log" Nov 28 11:46:00 crc kubenswrapper[4923]: I1128 11:46:00.502821 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-rwgwx_20b869c7-4aad-4ba4-a8c0-c1dbd053515c/extract-content/0.log" Nov 28 11:46:00 crc kubenswrapper[4923]: I1128 11:46:00.509618 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-rwgwx_20b869c7-4aad-4ba4-a8c0-c1dbd053515c/extract-content/0.log" Nov 28 11:46:00 crc kubenswrapper[4923]: I1128 11:46:00.683090 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-rwgwx_20b869c7-4aad-4ba4-a8c0-c1dbd053515c/extract-content/0.log" Nov 28 11:46:00 crc kubenswrapper[4923]: I1128 11:46:00.704243 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-rwgwx_20b869c7-4aad-4ba4-a8c0-c1dbd053515c/extract-utilities/0.log" Nov 28 11:46:00 crc kubenswrapper[4923]: I1128 11:46:00.908716 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-l2szt_4c8bb38f-8751-4623-9098-d7a8bc8423ec/extract-utilities/0.log" Nov 28 11:46:01 crc kubenswrapper[4923]: I1128 11:46:01.004146 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-rwgwx_20b869c7-4aad-4ba4-a8c0-c1dbd053515c/registry-server/0.log" Nov 28 11:46:01 crc kubenswrapper[4923]: I1128 11:46:01.166917 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-l2szt_4c8bb38f-8751-4623-9098-d7a8bc8423ec/extract-content/0.log" Nov 28 11:46:01 crc kubenswrapper[4923]: I1128 11:46:01.178960 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-l2szt_4c8bb38f-8751-4623-9098-d7a8bc8423ec/extract-utilities/0.log" Nov 28 11:46:01 crc kubenswrapper[4923]: I1128 11:46:01.204821 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-l2szt_4c8bb38f-8751-4623-9098-d7a8bc8423ec/extract-content/0.log" Nov 28 11:46:01 crc kubenswrapper[4923]: I1128 11:46:01.354409 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-l2szt_4c8bb38f-8751-4623-9098-d7a8bc8423ec/extract-content/0.log" Nov 28 11:46:01 crc kubenswrapper[4923]: I1128 11:46:01.399087 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-l2szt_4c8bb38f-8751-4623-9098-d7a8bc8423ec/extract-utilities/0.log" Nov 28 11:46:01 crc kubenswrapper[4923]: I1128 11:46:01.652722 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-l2szt_4c8bb38f-8751-4623-9098-d7a8bc8423ec/registry-server/0.log" Nov 28 11:46:01 crc kubenswrapper[4923]: I1128 11:46:01.662483 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-mswcg_82dd6941-bad4-4c84-8589-efc71c77359b/marketplace-operator/0.log" Nov 28 11:46:01 crc kubenswrapper[4923]: I1128 11:46:01.738839 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-sb2nx_c2b69365-6bbb-41c8-afba-9d95f8f80d48/extract-utilities/0.log" Nov 28 11:46:01 crc kubenswrapper[4923]: I1128 11:46:01.913124 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-sb2nx_c2b69365-6bbb-41c8-afba-9d95f8f80d48/extract-utilities/0.log" Nov 28 11:46:01 crc kubenswrapper[4923]: I1128 11:46:01.925572 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-sb2nx_c2b69365-6bbb-41c8-afba-9d95f8f80d48/extract-content/0.log" Nov 28 11:46:01 crc kubenswrapper[4923]: I1128 11:46:01.957390 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-sb2nx_c2b69365-6bbb-41c8-afba-9d95f8f80d48/extract-content/0.log" Nov 28 11:46:02 crc kubenswrapper[4923]: I1128 11:46:02.169511 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-sb2nx_c2b69365-6bbb-41c8-afba-9d95f8f80d48/extract-content/0.log" Nov 28 11:46:02 crc kubenswrapper[4923]: I1128 11:46:02.204242 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-sb2nx_c2b69365-6bbb-41c8-afba-9d95f8f80d48/extract-utilities/0.log" Nov 28 11:46:02 crc kubenswrapper[4923]: I1128 11:46:02.276994 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-sb2nx_c2b69365-6bbb-41c8-afba-9d95f8f80d48/registry-server/0.log" Nov 28 11:46:02 crc kubenswrapper[4923]: I1128 11:46:02.422719 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-l2nw6_4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c/extract-utilities/0.log" Nov 28 11:46:02 crc kubenswrapper[4923]: I1128 11:46:02.577014 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-l2nw6_4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c/extract-utilities/0.log" Nov 28 11:46:02 crc kubenswrapper[4923]: I1128 11:46:02.593737 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-l2nw6_4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c/extract-content/0.log" Nov 28 11:46:02 crc kubenswrapper[4923]: I1128 11:46:02.630968 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-l2nw6_4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c/extract-content/0.log" Nov 28 11:46:02 crc kubenswrapper[4923]: I1128 11:46:02.789086 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-l2nw6_4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c/extract-content/0.log" Nov 28 11:46:02 crc kubenswrapper[4923]: I1128 11:46:02.819858 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-l2nw6_4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c/extract-utilities/0.log" Nov 28 11:46:03 crc kubenswrapper[4923]: I1128 11:46:03.051851 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-l2nw6_4a7b435f-bbd9-4ca6-8d5d-d411879c0b0c/registry-server/0.log" Nov 28 11:46:14 crc kubenswrapper[4923]: I1128 11:46:14.026045 4923 patch_prober.go:28] interesting pod/machine-config-daemon-bwdth container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:46:14 crc kubenswrapper[4923]: I1128 11:46:14.028500 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:46:14 crc kubenswrapper[4923]: I1128 11:46:14.028769 4923 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" Nov 28 11:46:14 crc kubenswrapper[4923]: I1128 11:46:14.030108 4923 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49"} pod="openshift-machine-config-operator/machine-config-daemon-bwdth" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 11:46:14 crc kubenswrapper[4923]: I1128 11:46:14.030378 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerName="machine-config-daemon" containerID="cri-o://1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" gracePeriod=600 Nov 28 11:46:14 crc kubenswrapper[4923]: E1128 11:46:14.166637 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:46:14 crc kubenswrapper[4923]: I1128 11:46:14.304335 4923 generic.go:334] "Generic (PLEG): container finished" podID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" exitCode=0 Nov 28 11:46:14 crc kubenswrapper[4923]: I1128 11:46:14.304428 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerDied","Data":"1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49"} Nov 28 11:46:14 crc kubenswrapper[4923]: I1128 11:46:14.304477 4923 scope.go:117] "RemoveContainer" containerID="201e214c150f7a94a55f7f14ac88bad5c3f58ddde6dc1868cf78309362438d26" Nov 28 11:46:14 crc kubenswrapper[4923]: I1128 11:46:14.305366 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:46:14 crc kubenswrapper[4923]: E1128 11:46:14.305894 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:46:17 crc kubenswrapper[4923]: I1128 11:46:17.749099 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-k9dql"] Nov 28 11:46:17 crc kubenswrapper[4923]: E1128 11:46:17.750780 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5534c806-78b7-4bc3-8274-9e651e883c9b" containerName="extract-content" Nov 28 11:46:17 crc kubenswrapper[4923]: I1128 11:46:17.750849 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="5534c806-78b7-4bc3-8274-9e651e883c9b" containerName="extract-content" Nov 28 11:46:17 crc kubenswrapper[4923]: E1128 11:46:17.750921 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5534c806-78b7-4bc3-8274-9e651e883c9b" containerName="registry-server" Nov 28 11:46:17 crc kubenswrapper[4923]: I1128 11:46:17.750997 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="5534c806-78b7-4bc3-8274-9e651e883c9b" containerName="registry-server" Nov 28 11:46:17 crc kubenswrapper[4923]: E1128 11:46:17.751056 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5534c806-78b7-4bc3-8274-9e651e883c9b" containerName="extract-utilities" Nov 28 11:46:17 crc kubenswrapper[4923]: I1128 11:46:17.751114 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="5534c806-78b7-4bc3-8274-9e651e883c9b" containerName="extract-utilities" Nov 28 11:46:17 crc kubenswrapper[4923]: E1128 11:46:17.751174 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb49ceb3-8c67-4465-90af-fbc481fb2339" containerName="collect-profiles" Nov 28 11:46:17 crc kubenswrapper[4923]: I1128 11:46:17.751225 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb49ceb3-8c67-4465-90af-fbc481fb2339" containerName="collect-profiles" Nov 28 11:46:17 crc kubenswrapper[4923]: I1128 11:46:17.751434 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb49ceb3-8c67-4465-90af-fbc481fb2339" containerName="collect-profiles" Nov 28 11:46:17 crc kubenswrapper[4923]: I1128 11:46:17.751561 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="5534c806-78b7-4bc3-8274-9e651e883c9b" containerName="registry-server" Nov 28 11:46:17 crc kubenswrapper[4923]: I1128 11:46:17.752780 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k9dql" Nov 28 11:46:17 crc kubenswrapper[4923]: I1128 11:46:17.763551 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k9dql"] Nov 28 11:46:17 crc kubenswrapper[4923]: I1128 11:46:17.802183 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c580cc0-3dd2-4c2f-a5fc-516529aa4774-catalog-content\") pod \"certified-operators-k9dql\" (UID: \"5c580cc0-3dd2-4c2f-a5fc-516529aa4774\") " pod="openshift-marketplace/certified-operators-k9dql" Nov 28 11:46:17 crc kubenswrapper[4923]: I1128 11:46:17.802575 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c580cc0-3dd2-4c2f-a5fc-516529aa4774-utilities\") pod \"certified-operators-k9dql\" (UID: \"5c580cc0-3dd2-4c2f-a5fc-516529aa4774\") " pod="openshift-marketplace/certified-operators-k9dql" Nov 28 11:46:17 crc kubenswrapper[4923]: I1128 11:46:17.802740 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swggg\" (UniqueName: \"kubernetes.io/projected/5c580cc0-3dd2-4c2f-a5fc-516529aa4774-kube-api-access-swggg\") pod \"certified-operators-k9dql\" (UID: \"5c580cc0-3dd2-4c2f-a5fc-516529aa4774\") " pod="openshift-marketplace/certified-operators-k9dql" Nov 28 11:46:17 crc kubenswrapper[4923]: I1128 11:46:17.904789 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swggg\" (UniqueName: \"kubernetes.io/projected/5c580cc0-3dd2-4c2f-a5fc-516529aa4774-kube-api-access-swggg\") pod \"certified-operators-k9dql\" (UID: \"5c580cc0-3dd2-4c2f-a5fc-516529aa4774\") " pod="openshift-marketplace/certified-operators-k9dql" Nov 28 11:46:17 crc kubenswrapper[4923]: I1128 11:46:17.905137 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c580cc0-3dd2-4c2f-a5fc-516529aa4774-catalog-content\") pod \"certified-operators-k9dql\" (UID: \"5c580cc0-3dd2-4c2f-a5fc-516529aa4774\") " pod="openshift-marketplace/certified-operators-k9dql" Nov 28 11:46:17 crc kubenswrapper[4923]: I1128 11:46:17.905191 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c580cc0-3dd2-4c2f-a5fc-516529aa4774-utilities\") pod \"certified-operators-k9dql\" (UID: \"5c580cc0-3dd2-4c2f-a5fc-516529aa4774\") " pod="openshift-marketplace/certified-operators-k9dql" Nov 28 11:46:17 crc kubenswrapper[4923]: I1128 11:46:17.905758 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c580cc0-3dd2-4c2f-a5fc-516529aa4774-utilities\") pod \"certified-operators-k9dql\" (UID: \"5c580cc0-3dd2-4c2f-a5fc-516529aa4774\") " pod="openshift-marketplace/certified-operators-k9dql" Nov 28 11:46:17 crc kubenswrapper[4923]: I1128 11:46:17.905765 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c580cc0-3dd2-4c2f-a5fc-516529aa4774-catalog-content\") pod \"certified-operators-k9dql\" (UID: \"5c580cc0-3dd2-4c2f-a5fc-516529aa4774\") " pod="openshift-marketplace/certified-operators-k9dql" Nov 28 11:46:17 crc kubenswrapper[4923]: I1128 11:46:17.935221 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swggg\" (UniqueName: \"kubernetes.io/projected/5c580cc0-3dd2-4c2f-a5fc-516529aa4774-kube-api-access-swggg\") pod \"certified-operators-k9dql\" (UID: \"5c580cc0-3dd2-4c2f-a5fc-516529aa4774\") " pod="openshift-marketplace/certified-operators-k9dql" Nov 28 11:46:18 crc kubenswrapper[4923]: I1128 11:46:18.076776 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k9dql" Nov 28 11:46:18 crc kubenswrapper[4923]: I1128 11:46:18.752821 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k9dql"] Nov 28 11:46:19 crc kubenswrapper[4923]: I1128 11:46:19.354114 4923 generic.go:334] "Generic (PLEG): container finished" podID="5c580cc0-3dd2-4c2f-a5fc-516529aa4774" containerID="a397ca72295c8616e56df98df276d0895163380b319625b2e4d06cdc4e9be34d" exitCode=0 Nov 28 11:46:19 crc kubenswrapper[4923]: I1128 11:46:19.354164 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k9dql" event={"ID":"5c580cc0-3dd2-4c2f-a5fc-516529aa4774","Type":"ContainerDied","Data":"a397ca72295c8616e56df98df276d0895163380b319625b2e4d06cdc4e9be34d"} Nov 28 11:46:19 crc kubenswrapper[4923]: I1128 11:46:19.354194 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k9dql" event={"ID":"5c580cc0-3dd2-4c2f-a5fc-516529aa4774","Type":"ContainerStarted","Data":"4042487253a68c113564a226ca5b9aa28858ed08400467d49934f59ecf957aae"} Nov 28 11:46:19 crc kubenswrapper[4923]: I1128 11:46:19.356183 4923 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 11:46:20 crc kubenswrapper[4923]: I1128 11:46:20.365062 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k9dql" event={"ID":"5c580cc0-3dd2-4c2f-a5fc-516529aa4774","Type":"ContainerStarted","Data":"185759e6eaf19194f307d46953330184dcefa965c82f27e429c2a5ff1873ac77"} Nov 28 11:46:22 crc kubenswrapper[4923]: I1128 11:46:22.380992 4923 generic.go:334] "Generic (PLEG): container finished" podID="5c580cc0-3dd2-4c2f-a5fc-516529aa4774" containerID="185759e6eaf19194f307d46953330184dcefa965c82f27e429c2a5ff1873ac77" exitCode=0 Nov 28 11:46:22 crc kubenswrapper[4923]: I1128 11:46:22.381363 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k9dql" event={"ID":"5c580cc0-3dd2-4c2f-a5fc-516529aa4774","Type":"ContainerDied","Data":"185759e6eaf19194f307d46953330184dcefa965c82f27e429c2a5ff1873ac77"} Nov 28 11:46:23 crc kubenswrapper[4923]: I1128 11:46:23.391511 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k9dql" event={"ID":"5c580cc0-3dd2-4c2f-a5fc-516529aa4774","Type":"ContainerStarted","Data":"76dc6d055cfead1397826aa59589c325a56bdec594868edddef07138142379bf"} Nov 28 11:46:27 crc kubenswrapper[4923]: I1128 11:46:27.168641 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:46:27 crc kubenswrapper[4923]: E1128 11:46:27.169537 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:46:28 crc kubenswrapper[4923]: I1128 11:46:28.077745 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-k9dql" Nov 28 11:46:28 crc kubenswrapper[4923]: I1128 11:46:28.078185 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-k9dql" Nov 28 11:46:28 crc kubenswrapper[4923]: I1128 11:46:28.182631 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-k9dql" Nov 28 11:46:28 crc kubenswrapper[4923]: I1128 11:46:28.201797 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-k9dql" podStartSLOduration=7.432931852 podStartE2EDuration="11.201779921s" podCreationTimestamp="2025-11-28 11:46:17 +0000 UTC" firstStartedPulling="2025-11-28 11:46:19.35588215 +0000 UTC m=+2258.484566360" lastFinishedPulling="2025-11-28 11:46:23.124730209 +0000 UTC m=+2262.253414429" observedRunningTime="2025-11-28 11:46:23.420855077 +0000 UTC m=+2262.549539287" watchObservedRunningTime="2025-11-28 11:46:28.201779921 +0000 UTC m=+2267.330464131" Nov 28 11:46:28 crc kubenswrapper[4923]: I1128 11:46:28.481920 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-k9dql" Nov 28 11:46:28 crc kubenswrapper[4923]: I1128 11:46:28.532107 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k9dql"] Nov 28 11:46:30 crc kubenswrapper[4923]: I1128 11:46:30.452361 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-k9dql" podUID="5c580cc0-3dd2-4c2f-a5fc-516529aa4774" containerName="registry-server" containerID="cri-o://76dc6d055cfead1397826aa59589c325a56bdec594868edddef07138142379bf" gracePeriod=2 Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.008281 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k9dql" Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.051998 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c580cc0-3dd2-4c2f-a5fc-516529aa4774-catalog-content\") pod \"5c580cc0-3dd2-4c2f-a5fc-516529aa4774\" (UID: \"5c580cc0-3dd2-4c2f-a5fc-516529aa4774\") " Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.052151 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-swggg\" (UniqueName: \"kubernetes.io/projected/5c580cc0-3dd2-4c2f-a5fc-516529aa4774-kube-api-access-swggg\") pod \"5c580cc0-3dd2-4c2f-a5fc-516529aa4774\" (UID: \"5c580cc0-3dd2-4c2f-a5fc-516529aa4774\") " Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.052203 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c580cc0-3dd2-4c2f-a5fc-516529aa4774-utilities\") pod \"5c580cc0-3dd2-4c2f-a5fc-516529aa4774\" (UID: \"5c580cc0-3dd2-4c2f-a5fc-516529aa4774\") " Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.053228 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c580cc0-3dd2-4c2f-a5fc-516529aa4774-utilities" (OuterVolumeSpecName: "utilities") pod "5c580cc0-3dd2-4c2f-a5fc-516529aa4774" (UID: "5c580cc0-3dd2-4c2f-a5fc-516529aa4774"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.089835 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c580cc0-3dd2-4c2f-a5fc-516529aa4774-kube-api-access-swggg" (OuterVolumeSpecName: "kube-api-access-swggg") pod "5c580cc0-3dd2-4c2f-a5fc-516529aa4774" (UID: "5c580cc0-3dd2-4c2f-a5fc-516529aa4774"). InnerVolumeSpecName "kube-api-access-swggg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.138343 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c580cc0-3dd2-4c2f-a5fc-516529aa4774-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5c580cc0-3dd2-4c2f-a5fc-516529aa4774" (UID: "5c580cc0-3dd2-4c2f-a5fc-516529aa4774"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.154061 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c580cc0-3dd2-4c2f-a5fc-516529aa4774-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.154264 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-swggg\" (UniqueName: \"kubernetes.io/projected/5c580cc0-3dd2-4c2f-a5fc-516529aa4774-kube-api-access-swggg\") on node \"crc\" DevicePath \"\"" Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.154323 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c580cc0-3dd2-4c2f-a5fc-516529aa4774-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.468968 4923 generic.go:334] "Generic (PLEG): container finished" podID="5c580cc0-3dd2-4c2f-a5fc-516529aa4774" containerID="76dc6d055cfead1397826aa59589c325a56bdec594868edddef07138142379bf" exitCode=0 Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.469246 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k9dql" event={"ID":"5c580cc0-3dd2-4c2f-a5fc-516529aa4774","Type":"ContainerDied","Data":"76dc6d055cfead1397826aa59589c325a56bdec594868edddef07138142379bf"} Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.469273 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k9dql" event={"ID":"5c580cc0-3dd2-4c2f-a5fc-516529aa4774","Type":"ContainerDied","Data":"4042487253a68c113564a226ca5b9aa28858ed08400467d49934f59ecf957aae"} Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.469305 4923 scope.go:117] "RemoveContainer" containerID="76dc6d055cfead1397826aa59589c325a56bdec594868edddef07138142379bf" Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.469436 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k9dql" Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.490908 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k9dql"] Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.500438 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-k9dql"] Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.527966 4923 scope.go:117] "RemoveContainer" containerID="185759e6eaf19194f307d46953330184dcefa965c82f27e429c2a5ff1873ac77" Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.545004 4923 scope.go:117] "RemoveContainer" containerID="a397ca72295c8616e56df98df276d0895163380b319625b2e4d06cdc4e9be34d" Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.581690 4923 scope.go:117] "RemoveContainer" containerID="76dc6d055cfead1397826aa59589c325a56bdec594868edddef07138142379bf" Nov 28 11:46:31 crc kubenswrapper[4923]: E1128 11:46:31.582413 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76dc6d055cfead1397826aa59589c325a56bdec594868edddef07138142379bf\": container with ID starting with 76dc6d055cfead1397826aa59589c325a56bdec594868edddef07138142379bf not found: ID does not exist" containerID="76dc6d055cfead1397826aa59589c325a56bdec594868edddef07138142379bf" Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.582452 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76dc6d055cfead1397826aa59589c325a56bdec594868edddef07138142379bf"} err="failed to get container status \"76dc6d055cfead1397826aa59589c325a56bdec594868edddef07138142379bf\": rpc error: code = NotFound desc = could not find container \"76dc6d055cfead1397826aa59589c325a56bdec594868edddef07138142379bf\": container with ID starting with 76dc6d055cfead1397826aa59589c325a56bdec594868edddef07138142379bf not found: ID does not exist" Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.582484 4923 scope.go:117] "RemoveContainer" containerID="185759e6eaf19194f307d46953330184dcefa965c82f27e429c2a5ff1873ac77" Nov 28 11:46:31 crc kubenswrapper[4923]: E1128 11:46:31.582790 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"185759e6eaf19194f307d46953330184dcefa965c82f27e429c2a5ff1873ac77\": container with ID starting with 185759e6eaf19194f307d46953330184dcefa965c82f27e429c2a5ff1873ac77 not found: ID does not exist" containerID="185759e6eaf19194f307d46953330184dcefa965c82f27e429c2a5ff1873ac77" Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.582837 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"185759e6eaf19194f307d46953330184dcefa965c82f27e429c2a5ff1873ac77"} err="failed to get container status \"185759e6eaf19194f307d46953330184dcefa965c82f27e429c2a5ff1873ac77\": rpc error: code = NotFound desc = could not find container \"185759e6eaf19194f307d46953330184dcefa965c82f27e429c2a5ff1873ac77\": container with ID starting with 185759e6eaf19194f307d46953330184dcefa965c82f27e429c2a5ff1873ac77 not found: ID does not exist" Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.582854 4923 scope.go:117] "RemoveContainer" containerID="a397ca72295c8616e56df98df276d0895163380b319625b2e4d06cdc4e9be34d" Nov 28 11:46:31 crc kubenswrapper[4923]: E1128 11:46:31.583183 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a397ca72295c8616e56df98df276d0895163380b319625b2e4d06cdc4e9be34d\": container with ID starting with a397ca72295c8616e56df98df276d0895163380b319625b2e4d06cdc4e9be34d not found: ID does not exist" containerID="a397ca72295c8616e56df98df276d0895163380b319625b2e4d06cdc4e9be34d" Nov 28 11:46:31 crc kubenswrapper[4923]: I1128 11:46:31.583242 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a397ca72295c8616e56df98df276d0895163380b319625b2e4d06cdc4e9be34d"} err="failed to get container status \"a397ca72295c8616e56df98df276d0895163380b319625b2e4d06cdc4e9be34d\": rpc error: code = NotFound desc = could not find container \"a397ca72295c8616e56df98df276d0895163380b319625b2e4d06cdc4e9be34d\": container with ID starting with a397ca72295c8616e56df98df276d0895163380b319625b2e4d06cdc4e9be34d not found: ID does not exist" Nov 28 11:46:33 crc kubenswrapper[4923]: I1128 11:46:33.178192 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c580cc0-3dd2-4c2f-a5fc-516529aa4774" path="/var/lib/kubelet/pods/5c580cc0-3dd2-4c2f-a5fc-516529aa4774/volumes" Nov 28 11:46:40 crc kubenswrapper[4923]: I1128 11:46:40.169152 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:46:40 crc kubenswrapper[4923]: E1128 11:46:40.170195 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:46:55 crc kubenswrapper[4923]: I1128 11:46:55.169467 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:46:55 crc kubenswrapper[4923]: E1128 11:46:55.170413 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:47:09 crc kubenswrapper[4923]: I1128 11:47:09.169583 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:47:09 crc kubenswrapper[4923]: E1128 11:47:09.170694 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:47:23 crc kubenswrapper[4923]: I1128 11:47:23.171089 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:47:23 crc kubenswrapper[4923]: E1128 11:47:23.171841 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:47:34 crc kubenswrapper[4923]: I1128 11:47:34.169248 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:47:34 crc kubenswrapper[4923]: E1128 11:47:34.170584 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:47:46 crc kubenswrapper[4923]: I1128 11:47:46.168656 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:47:46 crc kubenswrapper[4923]: E1128 11:47:46.170537 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:47:47 crc kubenswrapper[4923]: I1128 11:47:47.280855 4923 generic.go:334] "Generic (PLEG): container finished" podID="dadea3e7-5e33-4e45-a825-ea661d9c5f81" containerID="3c9ef6c67fd0302b0c7b56293f83f828cd181aeb1421582653bd50ed2787e563" exitCode=0 Nov 28 11:47:47 crc kubenswrapper[4923]: I1128 11:47:47.280969 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zr9xk/must-gather-fztwk" event={"ID":"dadea3e7-5e33-4e45-a825-ea661d9c5f81","Type":"ContainerDied","Data":"3c9ef6c67fd0302b0c7b56293f83f828cd181aeb1421582653bd50ed2787e563"} Nov 28 11:47:47 crc kubenswrapper[4923]: I1128 11:47:47.282010 4923 scope.go:117] "RemoveContainer" containerID="3c9ef6c67fd0302b0c7b56293f83f828cd181aeb1421582653bd50ed2787e563" Nov 28 11:47:47 crc kubenswrapper[4923]: I1128 11:47:47.519447 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-zr9xk_must-gather-fztwk_dadea3e7-5e33-4e45-a825-ea661d9c5f81/gather/0.log" Nov 28 11:47:55 crc kubenswrapper[4923]: I1128 11:47:55.397664 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-zr9xk/must-gather-fztwk"] Nov 28 11:47:55 crc kubenswrapper[4923]: I1128 11:47:55.398404 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-zr9xk/must-gather-fztwk" podUID="dadea3e7-5e33-4e45-a825-ea661d9c5f81" containerName="copy" containerID="cri-o://5893b5d45d617f66677d4c64363d1726b36f265567e37e6ddfd0fdf70d9ceb6a" gracePeriod=2 Nov 28 11:47:55 crc kubenswrapper[4923]: I1128 11:47:55.414630 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-zr9xk/must-gather-fztwk"] Nov 28 11:47:55 crc kubenswrapper[4923]: I1128 11:47:55.800827 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-zr9xk_must-gather-fztwk_dadea3e7-5e33-4e45-a825-ea661d9c5f81/copy/0.log" Nov 28 11:47:55 crc kubenswrapper[4923]: I1128 11:47:55.801207 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zr9xk/must-gather-fztwk" Nov 28 11:47:55 crc kubenswrapper[4923]: I1128 11:47:55.968036 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ggvc6\" (UniqueName: \"kubernetes.io/projected/dadea3e7-5e33-4e45-a825-ea661d9c5f81-kube-api-access-ggvc6\") pod \"dadea3e7-5e33-4e45-a825-ea661d9c5f81\" (UID: \"dadea3e7-5e33-4e45-a825-ea661d9c5f81\") " Nov 28 11:47:55 crc kubenswrapper[4923]: I1128 11:47:55.968507 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/dadea3e7-5e33-4e45-a825-ea661d9c5f81-must-gather-output\") pod \"dadea3e7-5e33-4e45-a825-ea661d9c5f81\" (UID: \"dadea3e7-5e33-4e45-a825-ea661d9c5f81\") " Nov 28 11:47:55 crc kubenswrapper[4923]: I1128 11:47:55.975895 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dadea3e7-5e33-4e45-a825-ea661d9c5f81-kube-api-access-ggvc6" (OuterVolumeSpecName: "kube-api-access-ggvc6") pod "dadea3e7-5e33-4e45-a825-ea661d9c5f81" (UID: "dadea3e7-5e33-4e45-a825-ea661d9c5f81"). InnerVolumeSpecName "kube-api-access-ggvc6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:47:56 crc kubenswrapper[4923]: I1128 11:47:56.070746 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ggvc6\" (UniqueName: \"kubernetes.io/projected/dadea3e7-5e33-4e45-a825-ea661d9c5f81-kube-api-access-ggvc6\") on node \"crc\" DevicePath \"\"" Nov 28 11:47:56 crc kubenswrapper[4923]: I1128 11:47:56.117091 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dadea3e7-5e33-4e45-a825-ea661d9c5f81-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "dadea3e7-5e33-4e45-a825-ea661d9c5f81" (UID: "dadea3e7-5e33-4e45-a825-ea661d9c5f81"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:47:56 crc kubenswrapper[4923]: I1128 11:47:56.172049 4923 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/dadea3e7-5e33-4e45-a825-ea661d9c5f81-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 28 11:47:56 crc kubenswrapper[4923]: I1128 11:47:56.369102 4923 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-zr9xk_must-gather-fztwk_dadea3e7-5e33-4e45-a825-ea661d9c5f81/copy/0.log" Nov 28 11:47:56 crc kubenswrapper[4923]: I1128 11:47:56.369608 4923 generic.go:334] "Generic (PLEG): container finished" podID="dadea3e7-5e33-4e45-a825-ea661d9c5f81" containerID="5893b5d45d617f66677d4c64363d1726b36f265567e37e6ddfd0fdf70d9ceb6a" exitCode=143 Nov 28 11:47:56 crc kubenswrapper[4923]: I1128 11:47:56.369703 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zr9xk/must-gather-fztwk" Nov 28 11:47:56 crc kubenswrapper[4923]: I1128 11:47:56.369706 4923 scope.go:117] "RemoveContainer" containerID="5893b5d45d617f66677d4c64363d1726b36f265567e37e6ddfd0fdf70d9ceb6a" Nov 28 11:47:56 crc kubenswrapper[4923]: I1128 11:47:56.432793 4923 scope.go:117] "RemoveContainer" containerID="3c9ef6c67fd0302b0c7b56293f83f828cd181aeb1421582653bd50ed2787e563" Nov 28 11:47:56 crc kubenswrapper[4923]: I1128 11:47:56.470606 4923 scope.go:117] "RemoveContainer" containerID="5893b5d45d617f66677d4c64363d1726b36f265567e37e6ddfd0fdf70d9ceb6a" Nov 28 11:47:56 crc kubenswrapper[4923]: E1128 11:47:56.471082 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5893b5d45d617f66677d4c64363d1726b36f265567e37e6ddfd0fdf70d9ceb6a\": container with ID starting with 5893b5d45d617f66677d4c64363d1726b36f265567e37e6ddfd0fdf70d9ceb6a not found: ID does not exist" containerID="5893b5d45d617f66677d4c64363d1726b36f265567e37e6ddfd0fdf70d9ceb6a" Nov 28 11:47:56 crc kubenswrapper[4923]: I1128 11:47:56.471140 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5893b5d45d617f66677d4c64363d1726b36f265567e37e6ddfd0fdf70d9ceb6a"} err="failed to get container status \"5893b5d45d617f66677d4c64363d1726b36f265567e37e6ddfd0fdf70d9ceb6a\": rpc error: code = NotFound desc = could not find container \"5893b5d45d617f66677d4c64363d1726b36f265567e37e6ddfd0fdf70d9ceb6a\": container with ID starting with 5893b5d45d617f66677d4c64363d1726b36f265567e37e6ddfd0fdf70d9ceb6a not found: ID does not exist" Nov 28 11:47:56 crc kubenswrapper[4923]: I1128 11:47:56.471174 4923 scope.go:117] "RemoveContainer" containerID="3c9ef6c67fd0302b0c7b56293f83f828cd181aeb1421582653bd50ed2787e563" Nov 28 11:47:56 crc kubenswrapper[4923]: E1128 11:47:56.471489 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c9ef6c67fd0302b0c7b56293f83f828cd181aeb1421582653bd50ed2787e563\": container with ID starting with 3c9ef6c67fd0302b0c7b56293f83f828cd181aeb1421582653bd50ed2787e563 not found: ID does not exist" containerID="3c9ef6c67fd0302b0c7b56293f83f828cd181aeb1421582653bd50ed2787e563" Nov 28 11:47:56 crc kubenswrapper[4923]: I1128 11:47:56.471510 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c9ef6c67fd0302b0c7b56293f83f828cd181aeb1421582653bd50ed2787e563"} err="failed to get container status \"3c9ef6c67fd0302b0c7b56293f83f828cd181aeb1421582653bd50ed2787e563\": rpc error: code = NotFound desc = could not find container \"3c9ef6c67fd0302b0c7b56293f83f828cd181aeb1421582653bd50ed2787e563\": container with ID starting with 3c9ef6c67fd0302b0c7b56293f83f828cd181aeb1421582653bd50ed2787e563 not found: ID does not exist" Nov 28 11:47:57 crc kubenswrapper[4923]: I1128 11:47:57.177514 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dadea3e7-5e33-4e45-a825-ea661d9c5f81" path="/var/lib/kubelet/pods/dadea3e7-5e33-4e45-a825-ea661d9c5f81/volumes" Nov 28 11:47:58 crc kubenswrapper[4923]: I1128 11:47:58.168398 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:47:58 crc kubenswrapper[4923]: E1128 11:47:58.168764 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:48:09 crc kubenswrapper[4923]: I1128 11:48:09.173180 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:48:09 crc kubenswrapper[4923]: E1128 11:48:09.174471 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:48:23 crc kubenswrapper[4923]: I1128 11:48:23.169776 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:48:23 crc kubenswrapper[4923]: E1128 11:48:23.170742 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:48:35 crc kubenswrapper[4923]: I1128 11:48:35.168372 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:48:35 crc kubenswrapper[4923]: E1128 11:48:35.170295 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:48:50 crc kubenswrapper[4923]: I1128 11:48:50.169762 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:48:50 crc kubenswrapper[4923]: E1128 11:48:50.170331 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:49:03 crc kubenswrapper[4923]: I1128 11:49:03.169433 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:49:03 crc kubenswrapper[4923]: E1128 11:49:03.170721 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:49:08 crc kubenswrapper[4923]: I1128 11:49:08.448073 4923 scope.go:117] "RemoveContainer" containerID="dd28ef2f6b1c1aea196b35441b0b3d842836059357d2b89923c6c27cb3c78a77" Nov 28 11:49:18 crc kubenswrapper[4923]: I1128 11:49:18.169158 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:49:18 crc kubenswrapper[4923]: E1128 11:49:18.169976 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:49:31 crc kubenswrapper[4923]: I1128 11:49:31.174165 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:49:31 crc kubenswrapper[4923]: E1128 11:49:31.174947 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:49:44 crc kubenswrapper[4923]: I1128 11:49:44.169284 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:49:44 crc kubenswrapper[4923]: E1128 11:49:44.170011 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:49:57 crc kubenswrapper[4923]: I1128 11:49:57.169553 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:49:57 crc kubenswrapper[4923]: E1128 11:49:57.170732 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:50:08 crc kubenswrapper[4923]: I1128 11:50:08.169757 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:50:08 crc kubenswrapper[4923]: E1128 11:50:08.170818 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:50:20 crc kubenswrapper[4923]: I1128 11:50:20.169068 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:50:20 crc kubenswrapper[4923]: E1128 11:50:20.171731 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:50:32 crc kubenswrapper[4923]: I1128 11:50:32.171116 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:50:32 crc kubenswrapper[4923]: E1128 11:50:32.172084 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:50:37 crc kubenswrapper[4923]: I1128 11:50:37.654287 4923 patch_prober.go:28] interesting pod/authentication-operator-69f744f599-cbtlt container/authentication-operator namespace/openshift-authentication-operator: Liveness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 11:50:37 crc kubenswrapper[4923]: I1128 11:50:37.654952 4923 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication-operator/authentication-operator-69f744f599-cbtlt" podUID="4926f4a2-0ee6-444b-a113-f6ee1d162d72" containerName="authentication-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 11:50:46 crc kubenswrapper[4923]: I1128 11:50:46.168989 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:50:46 crc kubenswrapper[4923]: E1128 11:50:46.169738 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:51:01 crc kubenswrapper[4923]: I1128 11:51:01.177847 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:51:01 crc kubenswrapper[4923]: E1128 11:51:01.178912 4923 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bwdth_openshift-machine-config-operator(092566f7-fc7d-4897-a1f2-4ecedcd3058e)\"" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" podUID="092566f7-fc7d-4897-a1f2-4ecedcd3058e" Nov 28 11:51:15 crc kubenswrapper[4923]: I1128 11:51:15.168994 4923 scope.go:117] "RemoveContainer" containerID="1c6d35ad730c5c4a031f298c6c53287617cc0cafbb1e8f3d75d9cfe5caf57a49" Nov 28 11:51:15 crc kubenswrapper[4923]: I1128 11:51:15.454443 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bwdth" event={"ID":"092566f7-fc7d-4897-a1f2-4ecedcd3058e","Type":"ContainerStarted","Data":"0ab89fe4c28b5d2372547e4f10a4bacee0b6019b8245b7a0657266f033e14172"} Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.549747 4923 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gmnlm"] Nov 28 11:51:55 crc kubenswrapper[4923]: E1128 11:51:55.551096 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dadea3e7-5e33-4e45-a825-ea661d9c5f81" containerName="copy" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.551122 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="dadea3e7-5e33-4e45-a825-ea661d9c5f81" containerName="copy" Nov 28 11:51:55 crc kubenswrapper[4923]: E1128 11:51:55.551146 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dadea3e7-5e33-4e45-a825-ea661d9c5f81" containerName="gather" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.551158 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="dadea3e7-5e33-4e45-a825-ea661d9c5f81" containerName="gather" Nov 28 11:51:55 crc kubenswrapper[4923]: E1128 11:51:55.551182 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c580cc0-3dd2-4c2f-a5fc-516529aa4774" containerName="extract-content" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.551195 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c580cc0-3dd2-4c2f-a5fc-516529aa4774" containerName="extract-content" Nov 28 11:51:55 crc kubenswrapper[4923]: E1128 11:51:55.551225 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c580cc0-3dd2-4c2f-a5fc-516529aa4774" containerName="registry-server" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.551237 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c580cc0-3dd2-4c2f-a5fc-516529aa4774" containerName="registry-server" Nov 28 11:51:55 crc kubenswrapper[4923]: E1128 11:51:55.551262 4923 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c580cc0-3dd2-4c2f-a5fc-516529aa4774" containerName="extract-utilities" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.551274 4923 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c580cc0-3dd2-4c2f-a5fc-516529aa4774" containerName="extract-utilities" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.551590 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="dadea3e7-5e33-4e45-a825-ea661d9c5f81" containerName="gather" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.551612 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="dadea3e7-5e33-4e45-a825-ea661d9c5f81" containerName="copy" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.551626 4923 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c580cc0-3dd2-4c2f-a5fc-516529aa4774" containerName="registry-server" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.554302 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gmnlm" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.567598 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gmnlm"] Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.682383 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd2efc9b-3725-4551-93c3-ad5b3186ce9a-utilities\") pod \"redhat-operators-gmnlm\" (UID: \"fd2efc9b-3725-4551-93c3-ad5b3186ce9a\") " pod="openshift-marketplace/redhat-operators-gmnlm" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.682684 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd2efc9b-3725-4551-93c3-ad5b3186ce9a-catalog-content\") pod \"redhat-operators-gmnlm\" (UID: \"fd2efc9b-3725-4551-93c3-ad5b3186ce9a\") " pod="openshift-marketplace/redhat-operators-gmnlm" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.682772 4923 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzvrz\" (UniqueName: \"kubernetes.io/projected/fd2efc9b-3725-4551-93c3-ad5b3186ce9a-kube-api-access-dzvrz\") pod \"redhat-operators-gmnlm\" (UID: \"fd2efc9b-3725-4551-93c3-ad5b3186ce9a\") " pod="openshift-marketplace/redhat-operators-gmnlm" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.785135 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd2efc9b-3725-4551-93c3-ad5b3186ce9a-utilities\") pod \"redhat-operators-gmnlm\" (UID: \"fd2efc9b-3725-4551-93c3-ad5b3186ce9a\") " pod="openshift-marketplace/redhat-operators-gmnlm" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.785248 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd2efc9b-3725-4551-93c3-ad5b3186ce9a-catalog-content\") pod \"redhat-operators-gmnlm\" (UID: \"fd2efc9b-3725-4551-93c3-ad5b3186ce9a\") " pod="openshift-marketplace/redhat-operators-gmnlm" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.785271 4923 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzvrz\" (UniqueName: \"kubernetes.io/projected/fd2efc9b-3725-4551-93c3-ad5b3186ce9a-kube-api-access-dzvrz\") pod \"redhat-operators-gmnlm\" (UID: \"fd2efc9b-3725-4551-93c3-ad5b3186ce9a\") " pod="openshift-marketplace/redhat-operators-gmnlm" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.785716 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd2efc9b-3725-4551-93c3-ad5b3186ce9a-utilities\") pod \"redhat-operators-gmnlm\" (UID: \"fd2efc9b-3725-4551-93c3-ad5b3186ce9a\") " pod="openshift-marketplace/redhat-operators-gmnlm" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.785746 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd2efc9b-3725-4551-93c3-ad5b3186ce9a-catalog-content\") pod \"redhat-operators-gmnlm\" (UID: \"fd2efc9b-3725-4551-93c3-ad5b3186ce9a\") " pod="openshift-marketplace/redhat-operators-gmnlm" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.804862 4923 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzvrz\" (UniqueName: \"kubernetes.io/projected/fd2efc9b-3725-4551-93c3-ad5b3186ce9a-kube-api-access-dzvrz\") pod \"redhat-operators-gmnlm\" (UID: \"fd2efc9b-3725-4551-93c3-ad5b3186ce9a\") " pod="openshift-marketplace/redhat-operators-gmnlm" Nov 28 11:51:55 crc kubenswrapper[4923]: I1128 11:51:55.880944 4923 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gmnlm" Nov 28 11:51:56 crc kubenswrapper[4923]: I1128 11:51:56.336541 4923 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gmnlm"] Nov 28 11:51:56 crc kubenswrapper[4923]: I1128 11:51:56.889878 4923 generic.go:334] "Generic (PLEG): container finished" podID="fd2efc9b-3725-4551-93c3-ad5b3186ce9a" containerID="3de95b02a15827565a32dbd8d23be41f7f002a46ad367fc9304c3f10a5c0aa90" exitCode=0 Nov 28 11:51:56 crc kubenswrapper[4923]: I1128 11:51:56.889992 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gmnlm" event={"ID":"fd2efc9b-3725-4551-93c3-ad5b3186ce9a","Type":"ContainerDied","Data":"3de95b02a15827565a32dbd8d23be41f7f002a46ad367fc9304c3f10a5c0aa90"} Nov 28 11:51:56 crc kubenswrapper[4923]: I1128 11:51:56.890212 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gmnlm" event={"ID":"fd2efc9b-3725-4551-93c3-ad5b3186ce9a","Type":"ContainerStarted","Data":"e13018e9a8b7bb97bbf9f99f8e66f8c64e8fb2d1f1b0b1ec5f520b38c45262e5"} Nov 28 11:51:56 crc kubenswrapper[4923]: I1128 11:51:56.892272 4923 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 11:51:57 crc kubenswrapper[4923]: I1128 11:51:57.900262 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gmnlm" event={"ID":"fd2efc9b-3725-4551-93c3-ad5b3186ce9a","Type":"ContainerStarted","Data":"81b597bd32a2e88130831226e550cfef2b1e2002848a4ba6b6e20c91211a4e22"} Nov 28 11:51:59 crc kubenswrapper[4923]: I1128 11:51:59.924608 4923 generic.go:334] "Generic (PLEG): container finished" podID="fd2efc9b-3725-4551-93c3-ad5b3186ce9a" containerID="81b597bd32a2e88130831226e550cfef2b1e2002848a4ba6b6e20c91211a4e22" exitCode=0 Nov 28 11:51:59 crc kubenswrapper[4923]: I1128 11:51:59.924713 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gmnlm" event={"ID":"fd2efc9b-3725-4551-93c3-ad5b3186ce9a","Type":"ContainerDied","Data":"81b597bd32a2e88130831226e550cfef2b1e2002848a4ba6b6e20c91211a4e22"} Nov 28 11:52:00 crc kubenswrapper[4923]: I1128 11:52:00.934211 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gmnlm" event={"ID":"fd2efc9b-3725-4551-93c3-ad5b3186ce9a","Type":"ContainerStarted","Data":"73fbf568cc2855c17966b16bb8bd24feaebe4410d8e2f48d48e2cc354ab876b1"} Nov 28 11:52:00 crc kubenswrapper[4923]: I1128 11:52:00.960136 4923 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gmnlm" podStartSLOduration=2.45202051 podStartE2EDuration="5.960120135s" podCreationTimestamp="2025-11-28 11:51:55 +0000 UTC" firstStartedPulling="2025-11-28 11:51:56.892049329 +0000 UTC m=+2596.020733539" lastFinishedPulling="2025-11-28 11:52:00.400148904 +0000 UTC m=+2599.528833164" observedRunningTime="2025-11-28 11:52:00.955033981 +0000 UTC m=+2600.083718191" watchObservedRunningTime="2025-11-28 11:52:00.960120135 +0000 UTC m=+2600.088804345" Nov 28 11:52:05 crc kubenswrapper[4923]: I1128 11:52:05.882191 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gmnlm" Nov 28 11:52:05 crc kubenswrapper[4923]: I1128 11:52:05.882756 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gmnlm" Nov 28 11:52:06 crc kubenswrapper[4923]: I1128 11:52:06.948375 4923 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gmnlm" podUID="fd2efc9b-3725-4551-93c3-ad5b3186ce9a" containerName="registry-server" probeResult="failure" output=< Nov 28 11:52:06 crc kubenswrapper[4923]: timeout: failed to connect service ":50051" within 1s Nov 28 11:52:06 crc kubenswrapper[4923]: > Nov 28 11:52:15 crc kubenswrapper[4923]: I1128 11:52:15.934166 4923 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gmnlm" Nov 28 11:52:16 crc kubenswrapper[4923]: I1128 11:52:16.013615 4923 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gmnlm" Nov 28 11:52:16 crc kubenswrapper[4923]: I1128 11:52:16.206347 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gmnlm"] Nov 28 11:52:17 crc kubenswrapper[4923]: I1128 11:52:17.100239 4923 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gmnlm" podUID="fd2efc9b-3725-4551-93c3-ad5b3186ce9a" containerName="registry-server" containerID="cri-o://73fbf568cc2855c17966b16bb8bd24feaebe4410d8e2f48d48e2cc354ab876b1" gracePeriod=2 Nov 28 11:52:17 crc kubenswrapper[4923]: I1128 11:52:17.590233 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gmnlm" Nov 28 11:52:17 crc kubenswrapper[4923]: I1128 11:52:17.768738 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd2efc9b-3725-4551-93c3-ad5b3186ce9a-utilities\") pod \"fd2efc9b-3725-4551-93c3-ad5b3186ce9a\" (UID: \"fd2efc9b-3725-4551-93c3-ad5b3186ce9a\") " Nov 28 11:52:17 crc kubenswrapper[4923]: I1128 11:52:17.768817 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd2efc9b-3725-4551-93c3-ad5b3186ce9a-catalog-content\") pod \"fd2efc9b-3725-4551-93c3-ad5b3186ce9a\" (UID: \"fd2efc9b-3725-4551-93c3-ad5b3186ce9a\") " Nov 28 11:52:17 crc kubenswrapper[4923]: I1128 11:52:17.768963 4923 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzvrz\" (UniqueName: \"kubernetes.io/projected/fd2efc9b-3725-4551-93c3-ad5b3186ce9a-kube-api-access-dzvrz\") pod \"fd2efc9b-3725-4551-93c3-ad5b3186ce9a\" (UID: \"fd2efc9b-3725-4551-93c3-ad5b3186ce9a\") " Nov 28 11:52:17 crc kubenswrapper[4923]: I1128 11:52:17.770176 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd2efc9b-3725-4551-93c3-ad5b3186ce9a-utilities" (OuterVolumeSpecName: "utilities") pod "fd2efc9b-3725-4551-93c3-ad5b3186ce9a" (UID: "fd2efc9b-3725-4551-93c3-ad5b3186ce9a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:52:17 crc kubenswrapper[4923]: I1128 11:52:17.773500 4923 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd2efc9b-3725-4551-93c3-ad5b3186ce9a-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:52:17 crc kubenswrapper[4923]: I1128 11:52:17.778577 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd2efc9b-3725-4551-93c3-ad5b3186ce9a-kube-api-access-dzvrz" (OuterVolumeSpecName: "kube-api-access-dzvrz") pod "fd2efc9b-3725-4551-93c3-ad5b3186ce9a" (UID: "fd2efc9b-3725-4551-93c3-ad5b3186ce9a"). InnerVolumeSpecName "kube-api-access-dzvrz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:52:17 crc kubenswrapper[4923]: I1128 11:52:17.875518 4923 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzvrz\" (UniqueName: \"kubernetes.io/projected/fd2efc9b-3725-4551-93c3-ad5b3186ce9a-kube-api-access-dzvrz\") on node \"crc\" DevicePath \"\"" Nov 28 11:52:17 crc kubenswrapper[4923]: I1128 11:52:17.915769 4923 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd2efc9b-3725-4551-93c3-ad5b3186ce9a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fd2efc9b-3725-4551-93c3-ad5b3186ce9a" (UID: "fd2efc9b-3725-4551-93c3-ad5b3186ce9a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:52:17 crc kubenswrapper[4923]: I1128 11:52:17.977414 4923 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd2efc9b-3725-4551-93c3-ad5b3186ce9a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:52:18 crc kubenswrapper[4923]: I1128 11:52:18.113521 4923 generic.go:334] "Generic (PLEG): container finished" podID="fd2efc9b-3725-4551-93c3-ad5b3186ce9a" containerID="73fbf568cc2855c17966b16bb8bd24feaebe4410d8e2f48d48e2cc354ab876b1" exitCode=0 Nov 28 11:52:18 crc kubenswrapper[4923]: I1128 11:52:18.113586 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gmnlm" event={"ID":"fd2efc9b-3725-4551-93c3-ad5b3186ce9a","Type":"ContainerDied","Data":"73fbf568cc2855c17966b16bb8bd24feaebe4410d8e2f48d48e2cc354ab876b1"} Nov 28 11:52:18 crc kubenswrapper[4923]: I1128 11:52:18.113590 4923 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gmnlm" Nov 28 11:52:18 crc kubenswrapper[4923]: I1128 11:52:18.113643 4923 scope.go:117] "RemoveContainer" containerID="73fbf568cc2855c17966b16bb8bd24feaebe4410d8e2f48d48e2cc354ab876b1" Nov 28 11:52:18 crc kubenswrapper[4923]: I1128 11:52:18.113626 4923 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gmnlm" event={"ID":"fd2efc9b-3725-4551-93c3-ad5b3186ce9a","Type":"ContainerDied","Data":"e13018e9a8b7bb97bbf9f99f8e66f8c64e8fb2d1f1b0b1ec5f520b38c45262e5"} Nov 28 11:52:18 crc kubenswrapper[4923]: I1128 11:52:18.142424 4923 scope.go:117] "RemoveContainer" containerID="81b597bd32a2e88130831226e550cfef2b1e2002848a4ba6b6e20c91211a4e22" Nov 28 11:52:18 crc kubenswrapper[4923]: I1128 11:52:18.162116 4923 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gmnlm"] Nov 28 11:52:18 crc kubenswrapper[4923]: I1128 11:52:18.166134 4923 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gmnlm"] Nov 28 11:52:18 crc kubenswrapper[4923]: I1128 11:52:18.190252 4923 scope.go:117] "RemoveContainer" containerID="3de95b02a15827565a32dbd8d23be41f7f002a46ad367fc9304c3f10a5c0aa90" Nov 28 11:52:18 crc kubenswrapper[4923]: I1128 11:52:18.229235 4923 scope.go:117] "RemoveContainer" containerID="73fbf568cc2855c17966b16bb8bd24feaebe4410d8e2f48d48e2cc354ab876b1" Nov 28 11:52:18 crc kubenswrapper[4923]: E1128 11:52:18.230058 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73fbf568cc2855c17966b16bb8bd24feaebe4410d8e2f48d48e2cc354ab876b1\": container with ID starting with 73fbf568cc2855c17966b16bb8bd24feaebe4410d8e2f48d48e2cc354ab876b1 not found: ID does not exist" containerID="73fbf568cc2855c17966b16bb8bd24feaebe4410d8e2f48d48e2cc354ab876b1" Nov 28 11:52:18 crc kubenswrapper[4923]: I1128 11:52:18.230093 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73fbf568cc2855c17966b16bb8bd24feaebe4410d8e2f48d48e2cc354ab876b1"} err="failed to get container status \"73fbf568cc2855c17966b16bb8bd24feaebe4410d8e2f48d48e2cc354ab876b1\": rpc error: code = NotFound desc = could not find container \"73fbf568cc2855c17966b16bb8bd24feaebe4410d8e2f48d48e2cc354ab876b1\": container with ID starting with 73fbf568cc2855c17966b16bb8bd24feaebe4410d8e2f48d48e2cc354ab876b1 not found: ID does not exist" Nov 28 11:52:18 crc kubenswrapper[4923]: I1128 11:52:18.230115 4923 scope.go:117] "RemoveContainer" containerID="81b597bd32a2e88130831226e550cfef2b1e2002848a4ba6b6e20c91211a4e22" Nov 28 11:52:18 crc kubenswrapper[4923]: E1128 11:52:18.230539 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81b597bd32a2e88130831226e550cfef2b1e2002848a4ba6b6e20c91211a4e22\": container with ID starting with 81b597bd32a2e88130831226e550cfef2b1e2002848a4ba6b6e20c91211a4e22 not found: ID does not exist" containerID="81b597bd32a2e88130831226e550cfef2b1e2002848a4ba6b6e20c91211a4e22" Nov 28 11:52:18 crc kubenswrapper[4923]: I1128 11:52:18.230579 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81b597bd32a2e88130831226e550cfef2b1e2002848a4ba6b6e20c91211a4e22"} err="failed to get container status \"81b597bd32a2e88130831226e550cfef2b1e2002848a4ba6b6e20c91211a4e22\": rpc error: code = NotFound desc = could not find container \"81b597bd32a2e88130831226e550cfef2b1e2002848a4ba6b6e20c91211a4e22\": container with ID starting with 81b597bd32a2e88130831226e550cfef2b1e2002848a4ba6b6e20c91211a4e22 not found: ID does not exist" Nov 28 11:52:18 crc kubenswrapper[4923]: I1128 11:52:18.230608 4923 scope.go:117] "RemoveContainer" containerID="3de95b02a15827565a32dbd8d23be41f7f002a46ad367fc9304c3f10a5c0aa90" Nov 28 11:52:18 crc kubenswrapper[4923]: E1128 11:52:18.230865 4923 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3de95b02a15827565a32dbd8d23be41f7f002a46ad367fc9304c3f10a5c0aa90\": container with ID starting with 3de95b02a15827565a32dbd8d23be41f7f002a46ad367fc9304c3f10a5c0aa90 not found: ID does not exist" containerID="3de95b02a15827565a32dbd8d23be41f7f002a46ad367fc9304c3f10a5c0aa90" Nov 28 11:52:18 crc kubenswrapper[4923]: I1128 11:52:18.231004 4923 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3de95b02a15827565a32dbd8d23be41f7f002a46ad367fc9304c3f10a5c0aa90"} err="failed to get container status \"3de95b02a15827565a32dbd8d23be41f7f002a46ad367fc9304c3f10a5c0aa90\": rpc error: code = NotFound desc = could not find container \"3de95b02a15827565a32dbd8d23be41f7f002a46ad367fc9304c3f10a5c0aa90\": container with ID starting with 3de95b02a15827565a32dbd8d23be41f7f002a46ad367fc9304c3f10a5c0aa90 not found: ID does not exist" Nov 28 11:52:19 crc kubenswrapper[4923]: I1128 11:52:19.182639 4923 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd2efc9b-3725-4551-93c3-ad5b3186ce9a" path="/var/lib/kubelet/pods/fd2efc9b-3725-4551-93c3-ad5b3186ce9a/volumes" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515112306400024435 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015112306401017353 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015112300574016503 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015112300574015453 5ustar corecore